diff --git a/.gitignore b/.gitignore index 9f98467261..65c27ae57b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,171 +1,176 @@ # Common \#* .\#* GPATH GRTAGS GTAGS TAGS Makefile Makefile.in .deps .libs *.pc *.pyc *.bz2 *.tar.gz *.rpm *.la *.lo *.o *~ *.gcda *.gcno # Autobuild aclocal.m4 autoconf autoheader autom4te.cache/ automake build.counter compile config.guess config.log config.status config.sub configure depcomp install-sh include/stamp-* libtool libtool.m4 ltdl.m4 libltdl ltmain.sh missing py-compile /m4/argz.m4 /m4/ltargz.m4 /m4/ltoptions.m4 /m4/ltsugar.m4 /m4/ltversion.m4 /m4/lt~obsolete.m4 test-driver ylwrap # Configure targets Doxyfile coverage.sh +cts/CTSlab.py cts/CTSvars.py cts/LSBDummy +cts/OCFIPraTest.py cts/benchmark/clubench +cts/fence_dummy cts/lxc_autogen.sh extra/logrotate/pacemaker +fencing/regression.py include/config.h include/config.h.in include/crm_config.h lrmd/pacemaker_remote lrmd/pacemaker_remoted lrmd/pacemaker_remote.service +lrmd/regression.py mcp/pacemaker mcp/pacemaker.combined.upstart mcp/pacemaker.service mcp/pacemaker.upstart pengine/regression.core.sh publican.cfg tools/cibsecret tools/coverage.sh tools/crm_error tools/crm_mon.service tools/crm_mon.upstart tools/crm_report tools/report.common -lrmd/regression.py -fencing/regression.py # Build targets *.7 *.7.xml *.7.html *.8 *.8.xml *.8.html attrd/attrd doc/*/en-US/images/*.png doc/*/tmp/** doc/*/publish cib/cib cib/cibmon cib/cibpipe crmd/atest crmd/crmd doc/api/* doc/Clusters_from_Scratch.txt doc/Pacemaker_Explained.txt doc/acls.html doc/crm_fencing.html doc/publican-catalog* fencing/stonith-test fencing/stonith_admin fencing/stonithd fencing/stonithd.xml lrmd/lrmd lrmd/lrmd_internal_ctl lrmd/lrmd_test mcp/pacemakerd pengine/pengine pengine/pengine.xml pengine/ptest scratch tools/attrd_updater tools/cibadmin tools/crm_attribute tools/crm_diff tools/crm_mon tools/crm_node tools/crm_resource tools/crm_shadow tools/crm_simulate tools/crm_verify tools/crmadmin tools/iso8601 tools/crm_ticket tools/report.collector.1 xml/crm.dtd xml/pacemaker*.rng xml/versions.rng doc/shared/en-US/*.xml doc/Clusters_from_Scratch.build doc/Clusters_from_Scratch/en-US/Ap-*.xml doc/Clusters_from_Scratch/en-US/Ch-*.xml +doc/Pacemaker_Administration.build +doc/Pacemaker_Administration/en-US/Ch-*.xml doc/Pacemaker_Development.build doc/Pacemaker_Development/en-US/Ch-*.xml doc/Pacemaker_Explained.build doc/Pacemaker_Explained/en-US/Ch-*.xml doc/Pacemaker_Explained/en-US/Ap-*.xml doc/Pacemaker_Remote.build doc/Pacemaker_Remote/en-US/Ch-*.xml lib/gnu/libgnu.a lib/gnu/stdalign.h *.coverity #Other mock HTML pacemaker*.spec pengine/.regression.failed.diff coverity-* compat_reports .ABI-build abi_dumps logs *.patch *.diff *.sed *.orig *.rej *.swp pengine/test10/shadow.* diff --git a/.travis.yml b/.travis.yml index bfe4bc090b..27d07a5731 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,74 +1,89 @@ # Control file for the Travis autobuilder # https://docs.travis-ci.com/user/customizing-the-build/ language: c compiler: - gcc - clang # - cov-build env: global: # -- BEGIN Coverity Scan ENV # Used by https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh # The build command with all of the arguments that you would apply to a manual `cov-build` - COVERITY_SCAN_BUILD_COMMAND="make" # Email address for notifications related to this build - OWNER_EMAIL="andrew@beekhof.net" # Regular expression selects on which branches to run analysis # Be aware of quotas. Do not run on every branch/commit - COVERITY_SCAN_BRANCH_PATTERN="1.1" # COVERITY_SCAN_TOKEN via "travis encrypt" using the repo's public key - secure: "qnrF7L8RejLUY7URdNe7XP4Hu4R55C0tvAuMRg4EjVtelOpw+nIgA7BLiX19q/70VjFuKcGnMhW28TdYl0uwMMdWKKxmwTim04Sy3UfOE2BPeuQOBphr+8s9gd0U1MO8j2dZ84A40t5Mkk946wWZwT0okpjOr/PfBOZkU3o87FM=" # -- END Coverity Scan ENV # sudo add-apt-repository ppa:hotot-team before_install: - sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu/ trusty main" - sudo apt-get update -qq +# To switch to Travis-CI's containerized (non-sudo) architecture, +# all our dependencies need to be on Travis's whitelist: +# https://github.com/travis-ci/apt-package-whitelist +# +# The only ones that aren't already are: +# - cluster-glue-dev: see open issue: +# https://github.com/travis-ci/apt-package-whitelist/issues/2936 +# - resource-agents: see open issue: +# https://github.com/travis-ci/apt-package-whitelist/issues/4261 +# - libdbus-1-dev: see multiple open issues: +# https://github.com/travis-ci/apt-package-whitelist/issues?utf8=%E2%9C%93&q=is%3Aissue+libdbus+-1-dev +# (a workaround is to install libdbus-glib-1-dev, which depends on it and is whitelisted) install: - - sudo apt-get install -qq automake autoconf chrpath libglib2.0-dev perl net-tools python libtool libxml2-dev bison flex uuid-dev libbz2-dev zlib1g-dev libltdl3-dev libgnutls-dev python-dev libpam0g-dev libncurses5-dev libcorosync-dev libxslt1-dev libdbus-1-dev + - sudo apt-get install -qq automake autoconf chrpath libtool perl python python-dev + - sudo apt-get install -qq libbz2-dev libdbus-1-dev libglib2.0-dev libgnutls-dev libltdl-dev + - sudo apt-get install -qq libncurses5-dev libpam0g-dev libxml2-dev libxslt1-dev uuid-dev + - sudo apt-get install -qq libqb-dev libcfg-dev libcmap-dev libcorosync-common-dev libcpg-dev + - sudo apt-get install -qq libquorum-dev libsam-dev libtotem-pg-dev libvotequorum-dev - sudo apt-get install -qq cluster-glue-dev resource-agents - - sudo apt-get install -qq libqb-dev before_script: # Save and restore CC so that ./configure can pass - export CC_SAVED=$CC - export CC=`echo ${CC} | sed s/cov-build/gcc/` - ./autogen.sh - ./configure - export CC=$CC_SAVED script: - if test ${CC} != cov-build; then sudo make install-exec-local || true; fi - if test ${CC} != cov-build; then make && ./BasicSanity.sh -V; fi - if test ${CC} = cov-build; then export CC=gcc; bash ./travisci_build_coverity_scan.sh; fi #after_script: #after_success: after_failure: - lsb_release -a - sudo cat /etc/apt/sources.list - whoami - env | sort - cat include/config.h notifications: irc: "irc.freenode.org#pcmk" # email: # recipients: # - developers@clusterlabs.org # whitelist branches: only: - master - "1.1" - "2.0" diff --git a/INSTALL.md b/INSTALL.md index bac7f8c536..852fbf4583 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,56 +1,53 @@ # How to Install Pacemaker ## Build Dependencies * automake 1.11 or later * autoconf 2.64 or later * libtool * libtool-ltdl-devel * libuuid-devel * pkgconfig * python (or python-devel if that's preferred as a build dependency) * glib2-devel 2.16.0 or later * libxml2-devel * libxslt-devel * bzip2-devel * gnutls-devel * pam-devel * libqb-devel ## Cluster Stack Dependencies (Pick at least one) -* clusterlib-devel (CMAN) -* corosynclib-devel (Corosync) +* Corosync: corosynclib-devel +* (no other stacks are currently supported) ## Optional Build Dependencies * ncurses-devel (interactive crm_mon) * systemd-devel (systemd support) * dbus-devel (systemd/upstart resource support) * cluster-glue-libs-devel (Linux-HA style fencing agents) * asciidoc (documentation) * help2man (documentation) * publican (documentation) * inkscape (documentation) * docbook-style-xsl (documentation) -* bison (rpm) -* byacc (rpm) -* flex (rpm) ## Optional testing dependencies * valgrind (if running CTS valgrind tests) * systemd-python (if using CTS on cluster nodes running systemd) * rsync (if running CTS container tests) * libvirt-daemon-driver-lxc (if running CTS container tests) * libvirt-daemon-lxc (if running CTS container tests) * libvirt-login-shell (if running CTS container tests) ## Source Control (GIT) git clone git://github.com/ClusterLabs/pacemaker.git [See Github](https://github.com/ClusterLabs/pacemaker) ## Installing from source $ ./autogen.sh $ ./configure $ make $ sudo make install diff --git a/Makefile.am b/Makefile.am index 3bf415d54d..584d5df9e4 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,83 +1,81 @@ # # Pacemaker code # # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # -EXTRA_DIST = autogen.sh ConfigureMe README.in m4/gnulib-cache.m4 +EXTRA_DIST = autogen.sh m4/gnulib-cache.m4 MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure DRF/config-h.in \ DRF/stamp-h.in libtool.m4 ltdl.m4 CORE = replace include lib mcp attrd pengine cib crmd fencing lrmd tools xml SUBDIRS = $(CORE) extra doc AM_CPPFLAGS = -I$(top_srcdir)/include doc_DATA = README.markdown COPYING -ACLOCAL_AMFLAGS = -I m4 - licensedir = $(docdir)/licenses/ license_DATA = $(wildcard licenses/*) # Test components SUBDIRS += cts testdir = $(datadir)/$(PACKAGE)/tests/ test_SCRIPTS = coverage.sh BasicSanity.sh test_DATA = valgrind-pcmk.suppressions # Scratch file for ad-hoc testing noinst_PROGRAMS = scratch nodist_scratch_SOURCES = scratch.c scratch_LDADD = $(top_builddir)/lib/common/libcrmcommon.la -lm scratch.c: echo 'int main(void){}' >$@ core: @echo "Building only core components: $(CORE)" list='$(CORE)'; for subdir in $$list; do echo "Building $$subdir"; $(MAKE) -C $$subdir all || exit 1; done core-install: @echo "Installing only core components: $(CORE)" list='$(CORE)'; for subdir in $$list; do echo "Installing $$subdir"; $(MAKE) -C $$subdir install || exit 1; done core-clean: @echo "Cleaning only core components: $(CORE)" list='$(CORE)'; for subdir in $$list; do echo "Cleaning $$subdir"; $(MAKE) -C $$subdir clean || exit 1; done install-exec-local: $(INSTALL) -d $(DESTDIR)/$(LCRSODIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CONFIG_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CORE_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_BLACKBOX_DIR) $(INSTALL) -d -m 770 $(DESTDIR)/$(CRM_LOG_DIR) $(INSTALL) -d -m 770 $(DESTDIR)/$(CRM_BUNDLE_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CONFIG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CORE_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_BLACKBOX_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_LOG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_BUNDLE_DIR) # Use chown because the user/group may not exist clean-generic: rm -f $(TARFILE) *.tar.bz2 *.sed dist-clean-local: rm -f autoconf automake autoheader diff --git a/cib/main.c b/cib/main.c index 0530b014f3..3e76983544 100644 --- a/cib/main.c +++ b/cib/main.c @@ -1,418 +1,418 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #if HAVE_LIBXML2 # include #endif #ifdef HAVE_GETOPT_H # include #endif #if HAVE_BZLIB_H # include #endif extern int init_remote_listener(int port, gboolean encrypted); gboolean cib_shutdown_flag = FALSE; int cib_status = pcmk_ok; crm_cluster_t crm_cluster; GMainLoop *mainloop = NULL; const char *cib_root = NULL; char *cib_our_uname = NULL; gboolean preserve_status = FALSE; /* volatile because it may be changed in a signal handler */ volatile gboolean cib_writes_enabled = TRUE; int remote_fd = 0; int remote_tls_fd = 0; int cib_init(void); void cib_shutdown(int nsig); static bool startCib(const char *filename); extern int write_cib_contents(gpointer p); GHashTable *config_hash = NULL; GHashTable *local_notify_queue = NULL; char *channel1 = NULL; char *channel2 = NULL; char *channel3 = NULL; char *channel4 = NULL; char *channel5 = NULL; #define OPTARGS "maswr:V?" void cib_cleanup(void); static void cib_enable_writes(int nsig) { crm_info("(Re)enabling disk writes"); cib_writes_enabled = TRUE; } static void log_cib_client(gpointer key, gpointer value, gpointer user_data) { crm_info("Client %s", crm_client_name(value)); } /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\tThis text"}, {"verbose", 0, 0, 'V', "\tIncrease debug output"}, {"per-action-cib", 0, 0, 'a', "\tAdvanced use only"}, {"stand-alone", 0, 0, 's', "\tAdvanced use only"}, {"disk-writes", 0, 0, 'w', "\tAdvanced use only"}, {"cib-root", 1, 0, 'r', "\tAdvanced use only"}, {0, 0, 0, 0} }; /* *INDENT-ON* */ int main(int argc, char **argv) { int flag; int rc = 0; int index = 0; int argerr = 0; struct passwd *pwentry = NULL; crm_log_preinit(NULL, argc, argv); crm_set_options(NULL, "[options]", long_options, "Daemon for storing and replicating the cluster configuration"); crm_peer_init(); mainloop_add_signal(SIGTERM, cib_shutdown); mainloop_add_signal(SIGPIPE, cib_enable_writes); cib_writer = mainloop_add_trigger(G_PRIORITY_LOW, write_cib_contents, NULL); while (1) { flag = crm_get_option(argc, argv, &index); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case 's': stand_alone = TRUE; preserve_status = TRUE; cib_writes_enabled = FALSE; pwentry = getpwnam(CRM_DAEMON_USER); CRM_CHECK(pwentry != NULL, crm_perror(LOG_ERR, "Invalid uid (%s) specified", CRM_DAEMON_USER); return CRM_EX_FATAL); rc = setgid(pwentry->pw_gid); if (rc < 0) { crm_perror(LOG_ERR, "Could not set group to %d", pwentry->pw_gid); return CRM_EX_FATAL; } - rc = initgroups(CRM_DAEMON_GROUP, pwentry->pw_gid); + rc = initgroups(CRM_DAEMON_USER, pwentry->pw_gid); if (rc < 0) { crm_perror(LOG_ERR, "Could not setup groups for user %d", pwentry->pw_uid); return CRM_EX_FATAL; } rc = setuid(pwentry->pw_uid); if (rc < 0) { crm_perror(LOG_ERR, "Could not set user to %d", pwentry->pw_uid); return CRM_EX_FATAL; } break; case '?': /* Help message */ crm_help(flag, CRM_EX_OK); break; case 'w': cib_writes_enabled = TRUE; break; case 'r': cib_root = optarg; break; case 'm': cib_metadata(); return CRM_EX_OK; default: ++argerr; break; } } if (argc - optind == 1 && safe_str_eq("metadata", argv[optind])) { cib_metadata(); return CRM_EX_OK; } if (optind > argc) { ++argerr; } if (argerr) { crm_help('?', CRM_EX_USAGE); } crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); if (cib_root == NULL) { cib_root = CRM_CONFIG_DIR; } else { crm_notice("Using custom config location: %s", cib_root); } if (crm_is_writable(cib_root, NULL, CRM_DAEMON_USER, CRM_DAEMON_GROUP, FALSE) == FALSE) { crm_err("Bad permissions on %s. Terminating", cib_root); fprintf(stderr, "ERROR: Bad permissions on %s. See logs for details\n", cib_root); fflush(stderr); return CRM_EX_FATAL; } /* read local config file */ cib_init(); // This should not be reachable CRM_CHECK(crm_hash_table_size(client_connections) == 0, crm_warn("Not all clients gone at exit")); g_hash_table_foreach(client_connections, log_cib_client, NULL); cib_cleanup(); crm_info("Done"); return CRM_EX_OK; } void cib_cleanup(void) { crm_peer_destroy(); if (local_notify_queue) { g_hash_table_destroy(local_notify_queue); } crm_client_cleanup(); g_hash_table_destroy(config_hash); free(cib_our_uname); free(channel1); free(channel2); free(channel3); free(channel4); free(channel5); } unsigned long cib_num_ops = 0; const char *cib_stat_interval = "10min"; unsigned long cib_num_local = 0, cib_num_updates = 0, cib_num_fail = 0; unsigned long cib_bad_connects = 0, cib_num_timeouts = 0; #if SUPPORT_COROSYNC static void cib_cs_dispatch(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { xml = string2xml(data); if (xml == NULL) { crm_err("Invalid XML: '%.120s'", data); free(data); return; } crm_xml_add(xml, F_ORIG, from); /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */ cib_peer_callback(xml, NULL); } free_xml(xml); free(data); } static void cib_cs_destroy(gpointer user_data) { if (cib_shutdown_flag) { crm_info("Corosync disconnection complete"); } else { crm_err("Corosync connection lost! Exiting."); terminate_cib(__FUNCTION__, CRM_EX_DISCONNECT); } } #endif static void cib_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data) { switch (type) { case crm_status_processes: if (cib_legacy_mode() && is_not_set(node->processes, crm_get_cluster_proc())) { uint32_t old = data? *(const uint32_t *)data : 0; if ((node->processes ^ old) & crm_proc_cpg) { crm_info("Attempting to disable legacy mode after %s left the cluster", node->uname); legacy_mode = FALSE; } } break; case crm_status_uname: case crm_status_rstate: case crm_status_nstate: if (cib_shutdown_flag && (crm_active_peers() < 2) && crm_hash_table_size(client_connections) == 0) { crm_info("No more peers"); terminate_cib(__FUNCTION__, -1); } break; } } int cib_init(void) { if (is_corosync_cluster()) { #if SUPPORT_COROSYNC crm_cluster.destroy = cib_cs_destroy; crm_cluster.cpg.cpg_deliver_fn = cib_cs_dispatch; crm_cluster.cpg.cpg_confchg_fn = pcmk_cpg_membership; #endif } config_hash = crm_str_table_new(); if (startCib("cib.xml") == FALSE) { crm_crit("Cannot start CIB... terminating"); crm_exit(CRM_EX_NOINPUT); } if (stand_alone == FALSE) { if (is_corosync_cluster()) { crm_set_status_callback(&cib_peer_update_callback); } if (crm_cluster_connect(&crm_cluster) == FALSE) { crm_crit("Cannot sign in to the cluster... terminating"); crm_exit(CRM_EX_FATAL); } cib_our_uname = crm_cluster.uname; } else { cib_our_uname = strdup("localhost"); } cib_ipc_servers_init(&ipcs_ro, &ipcs_rw, &ipcs_shm, &ipc_ro_callbacks, &ipc_rw_callbacks); if (stand_alone) { cib_is_master = TRUE; } /* Create the mainloop and run it... */ mainloop = g_main_new(FALSE); crm_info("Starting %s mainloop", crm_system_name); g_main_run(mainloop); /* If main loop returned, clean up and exit. We disconnect in case * terminate_cib() was called with fast=-1. */ crm_cluster_disconnect(&crm_cluster); cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm); return crm_exit(CRM_EX_OK); } static bool startCib(const char *filename) { gboolean active = FALSE; xmlNode *cib = readCibXmlFile(cib_root, filename, !preserve_status); if (activateCibXml(cib, TRUE, "start") == 0) { int port = 0; const char *port_s = NULL; active = TRUE; cib_read_config(config_hash, cib); port_s = crm_element_value(cib, "remote-tls-port"); if (port_s) { port = crm_parse_int(port_s, "0"); remote_tls_fd = init_remote_listener(port, TRUE); } port_s = crm_element_value(cib, "remote-clear-port"); if (port_s) { port = crm_parse_int(port_s, "0"); remote_fd = init_remote_listener(port, FALSE); } crm_info("CIB Initialization completed successfully"); } return active; } diff --git a/configure.ac b/configure.ac index 478b1f8d77..772edc4681 100644 --- a/configure.ac +++ b/configure.ac @@ -1,1729 +1,1745 @@ dnl dnl autoconf for Pacemaker dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) +AC_CONFIG_MACRO_DIR([m4]) + dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/crm_config.h) AC_ARG_WITH(version, [ --with-version=version Override package version (if you are a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you are a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) dnl 1.11: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl silent-rules: allow "--enable-silent-rules" (no-op in 1.13+) AM_INIT_AUTOMAKE([1.11 foreign silent-rules]) dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", [Current pacemaker version]) dnl Versioned attributes implementation is not yet production-ready AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes]) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC gl_EARLY gl_INIT LT_INIT([dlopen]) LTDL_INIT([convenience]) AC_PROG_YACC AM_PROG_LEX AC_C_STRINGIZE AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)], [RC=1; AC_MSG_RESULT(no)]) return $RC } dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi Force GCC to compile to ANSI standard for older compilers. [default=no] ]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings Enable pedantic and fatal warnings for gcc [default=yes] ]) AC_ARG_ENABLE([quiet], [ --enable-quiet Suppress make output unless there is an error [default=no] ]) AC_ARG_ENABLE([no-stack], [ --enable-no-stack Build only the Policy Engine and its requirements [default=no] ]) AC_ARG_ENABLE([upstart], [ --enable-upstart Enable support for managing resources via Upstart [default=try] ], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], [ --enable-systemd Enable support for managing resources via systemd [default=try]], [], [enable_systemd=try], ) AC_ARG_ENABLE(hardening, [ --with-hardening Harden the resulting executables/libraries (best effort by default) ], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) AC_ARG_WITH(corosync, [ --with-corosync Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH(nagios, [ --with-nagios Support nagios remote monitoring ], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) AC_ARG_WITH(nagios-plugin-dir, [ --with-nagios-plugin-dir=DIR Directory for nagios plugins [${NAGIOS_PLUGIN_DIR}] ], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH(nagios-metadata-dir, [ --with-nagios-metadata-dir=DIR Directory for nagios plugins metadata [${NAGIOS_METADATA_DIR}] ], [ NAGIOS_METADATA_DIR="$withval" ] ) AC_ARG_WITH(acl, [ --with-acl Support CIB ACL ], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) AC_ARG_WITH(cibsecrets, [ --with-cibsecrets Support separate file for CIB secrets ], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR Directory for init (rc) scripts [${INITDIR}] ], [ INITDIR="$withval" ]) SUPPORT_PROFILING=0 AC_ARG_WITH(profiling, [ --with-profiling Disable optimizations for effective profiling ], [ SUPPORT_PROFILING=$withval ]) AC_ARG_WITH(coverage, [ --with-coverage Disable optimizations for effective profiling ], [ SUPPORT_COVERAGE=$withval ]) PUBLICAN_BRAND="common" AC_ARG_WITH(brand, [ --with-brand=brand Brand to use for generated documentation (set empty for no docs) [$PUBLICAN_BRAND] ], [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ]) AC_SUBST(PUBLICAN_BRAND) CONFIGDIR="" AC_ARG_WITH(configdir, [ --with-configdir=DIR Directory for Pacemaker configuration file [${CONFIGDIR}]], [ CONFIGDIR="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH(logdir, [ --with-logdir=DIR Directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@ ], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH(bundledir, [ --with-bundledir=DIR Directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@ ], [ CRM_BUNDLE_DIR="$withval" ] ) dnl =============================================== dnl General Processing dnl =============================================== INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in prefix|NONE) exec_prefix=$prefix ;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR) ;; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in prefix|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) if test x"${CRM_LOG_DIR}" = x""; then CRM_LOG_DIR="${localstatedir}/log/pacemaker" fi AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) if test x"${CRM_BUNDLE_DIR}" = x""; then CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles" fi AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac ;; esac AC_MSG_CHECKING(which format is needed to print uint64_t) ac_save_CFLAGS=$CFLAGS CFLAGS="-Wall -Werror" AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [ #include #include #include ], [ int max = 512; uint64_t bignum = 42; char *buffer = malloc(max); const char *random = "random"; snprintf(buffer, max-1, "", bignum, random); fprintf(stderr, "Result: %s\n", buffer); ] )], [U64T="%lu"], [U64T="%llu"] ) CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) -AM_PATH_PYTHON +dnl Pacemaker's executable python scripts will invoke the python specified by +dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a +dnl built-in list with (unversioned) "python" having precedence. To configure +dnl Pacemaker to use a specific python interpreter version, define PYTHON +dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 +case "x$PYTHON" in + x*python3*) + dnl When used with Python 3, Pacemaker requires a minimum of 3.2 + AM_PATH_PYTHON([3.2]) + ;; + *) + dnl Otherwise, Pacemaker requires a minimum of 2.7 + AM_PATH_PYTHON([2.7]) + ;; +esac + AC_CHECK_PROGS(MAKE, gmake make) AC_PATH_PROGS(HTML2TXT, lynx w3m) AC_PATH_PROGS(HELP2MAN, help2man) AC_PATH_PROGS(POD2MAN, pod2man, pod2man) AC_PATH_PROGS(ASCIIDOC, asciidoc) AC_PATH_PROGS(PUBLICAN, publican) AC_PATH_PROGS(INKSCAPE, inkscape) AC_PATH_PROGS(XSLTPROC, xsltproc) AC_PATH_PROGS(XMLCATALOG, xmlcatalog) AC_PATH_PROGS(FOP, fop) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) PKG_PROG_PKG_CONFIG AC_PATH_PROGS(XML2CONFIG, xml2-config) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) -dnl Disable these until we decide if the stonith config file should be supported -dnl AC_PATH_PROGS(BISON, bison) -dnl AC_PATH_PROGS(FLEX, flex) -dnl AC_PATH_PROGS(HAVE_YACC, $YACC) - if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"") if test x"${ASCIIDOC}" != x""; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi publican_intree_brand=no if test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""; then dnl special handling for clusterlabs brand (possibly in-tree version used) test "${PUBLICAN_BRAND}" != "clusterlabs" \ || test -d /usr/share/publican/Common_Content/clusterlabs if test $? -ne 0; then dnl Unknown option: brand_dir vs. Option brand_dir requires an argument if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common]) PUBLICAN_BRAND=common else publican_intree_brand=yes fi fi AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand]) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi AM_CONDITIONAL([BUILD_DOCBOOK], [test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""]) AM_CONDITIONAL([PUBLICAN_INTREE_BRAND], [test x"${publican_intree_brand}" = x"yes"]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolearably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_FUNCS([sched_setscheduler]) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi if test x"${PKG_CONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi if $PKG_CONFIG --exists glib-2.0 then GLIBCONFIG="$PKG_CONFIG glib-2.0" else set -x echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH $PKG_CONFIG --exists glib-2.0; echo $? $PKG_CONFIG --cflags glib-2.0; echo $? $PKG_CONFIG glib-2.0; echo $? set +x AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE}) fi AC_MSG_RESULT(using $GLIBCONFIG) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi if test "X$GLIBCONFIG" != X; then AC_MSG_CHECKING(for special glib includes: ) GLIBHEAD=`$GLIBCONFIG --cflags` AC_MSG_RESULT($GLIBHEAD) CPPFLAGS="$CPPFLAGS $GLIBHEAD" AC_MSG_CHECKING(for glib library flags) GLIBLIB=`$GLIBCONFIG --libs` AC_MSG_RESULT($GLIBLIB) LIBS="$LIBS $GLIBLIB" fi dnl FreeBSD needs -lcompat for ftime() used by lrmd.c AC_CHECK_LIB([compat], [ftime], [COMPAT_LIBS='-lcompat']) AC_SUBST(COMPAT_LIBS) dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/swab.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/reboot.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/signalfd.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/timeb.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) dnl These headers need prerequisites before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) AC_MSG_CHECKING(for special libxml2 includes) if test "x$XML2CONFIG" = "x"; then AC_MSG_ERROR(libxml2 config not found) else XML2HEAD="`$XML2CONFIG --cflags`" AC_MSG_RESULT($XML2HEAD) AC_CHECK_LIB(xml2, xmlReadMemory) AC_CHECK_LIB(xslt, xsltApplyStylesheet) fi CPPFLAGS="$CPPFLAGS $XML2HEAD" AC_CHECK_HEADERS(libxml/xpath.h) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(The libxml developement headers were not found) fi if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(The libxslt developement headers were not found) fi AC_CACHE_CHECK(whether __progname and __progname_full are available, pf_cv_var_progname, AC_TRY_LINK([extern char *__progname, *__progname_full;], [__progname = "foo"; __progname_full = "foo bar";], pf_cv_var_progname="yes", pf_cv_var_progname="no")) if test "$pf_cv_var_progname" = "yes"; then AC_DEFINE(HAVE___PROGNAME,1,[ ]) fi dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(g_log_set_default_handler) AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_TRY_COMPILE([#include ],[sighandler_t *f;], has_sighandler_t=yes,has_sighandler_t=no) AC_MSG_RESULT($has_sighandler_t) if test "$has_sighandler_t" = "yes" ; then AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" ac_save_CFLAGS=$CFLAGS CFLAGS="-Wcast-qual -Werror" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 if cc_supports_flag -fPIC; then CFLAGS="$CFLAGS -fPIC" fi AC_MSG_CHECKING(whether printw() requires argument of "const char *") AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [ac_cv_compatible_printw=yes], [ac_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT([$ac_cv_compatible_printw]) if test "$ac_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0 ;; esac AC_MSG_NOTICE(New CFLAGS: $CFLAGS) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== if test x${enable_no_stack} = xyes; then SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb >= 0.13) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 0.14.0+ (2012-06) AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" dnl libqb 0.17.0+ (2014-02) AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function])) dnl Support Linux-HA fence agents if available if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_CHECK_HEADERS(stonith/stonith.h) if test "$ac_cv_header_stonith_stonith_h" = "yes"; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) CRM_DAEMON_USER="hacluster" AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) CRM_DAEMON_GROUP="haclient" AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_STATE_DIR=${localstatedir}/run/crm AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets) AC_SUBST(CRM_STATE_DIR) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) HA_STATE_DIR="${localstatedir}/run" AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where sbd keeps its PID file) AC_SUBST(HA_STATE_DIR) CRM_RSCTMP_DIR="${localstatedir}/run/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) OCF_ROOT_DIR="/usr/lib/ocf" if test "X$OCF_ROOT_DIR" = X; then AC_MSG_ERROR(Could not locate OCF directory) fi AC_SUBST(OCF_ROOT_DIR) OCF_RA_DIR="$OCF_ROOT_DIR/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) RH_STONITH_DIR="$sbindir" AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir", Location for system binaries) RH_STONITH_PREFIX="fence_" AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 HAVE_upstart=0 HAVE_systemd=0 PKG_CHECK_MODULES(DBUS, dbus-1, ,HAVE_dbus=0) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) if test $HAVE_dbus = 1; then CFLAGS="$CFLAGS `$PKG_CONFIG --cflags dbus-1`" fi DBUS_LIBS="$CFLAGS `$PKG_CONFIG --libs dbus-1`" AC_SUBST(DBUS_LIBS) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without DBus]) else enable_systemd=no fi fi if test "x${enable_systemd}" = xtry; then AC_MSG_CHECKING([for systemd version query result via dbus-send]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || systemctl --version 2>/dev/null | grep -q systemd; then enable_systemd=yes else enable_systemd=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) if test "x${enable_systemd}" = xyes; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" AC_MSG_CHECKING([for systemd path for system unit files]) systemdunitdir="${systemdunitdir-}" PKG_CHECK_VAR([systemdunitdir], [systemd], [systemdsystemunitdir], [], [systemdunitdir=no]) AC_MSG_RESULT([${systemdunitdir}]) if test "x${systemdunitdir}" = xno; then AC_MSG_FAILURE([cannot enable systemd when systemdunitdir unresolved]) fi fi AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then AC_MSG_FAILURE([cannot enable Upstart without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then AC_MSG_CHECKING([for Upstart version query result via dbus-send]) ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) case $SUPPORT_NAGIOS in 1|yes|true|try) SUPPORT_NAGIOS=1 ;; *) SUPPORT_NAGIOS=0 ;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_CS=no ;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS) SUPPORT_CS=1 PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS $cmap_CFLAGS $quorum_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" AC_CHECK_LIB(corosync_common, cs_strerror) STACKS="$STACKS corosync-native" fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AC_SUBST(SUPPORT_COROSYNC) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported. Just building the Policy Engine) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" AC_SUBST(CLUSTERLIBS) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_ACL=no ;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) SUPPORT_ACL=1 AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then SUPPORT_ACL=0 fi if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1 ;; *) SUPPORT_CIBSECRETS=0 ;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) if test $SUPPORT_CIBSECRETS = 1; then PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) fi dnl ======================================================================== dnl GnuTLS dnl ======================================================================== AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program. dnl If no 'libgnutls-config', try traditional autoconf means. AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config) if test -n "$LIBGNUTLS_CONFIG"; then AC_MSG_CHECKING(for gnutls header flags) GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`"; AC_MSG_RESULT($GNUTLSHEAD) AC_MSG_CHECKING(for gnutls library flags) GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`"; AC_MSG_RESULT($GNUTLSLIBS) fi AC_CHECK_LIB(gnutls, gnutls_init) AC_CHECK_FUNCS(gnutls_priority_set_direct) AC_SUBST(GNUTLSHEAD) AC_SUBST(GNUTLSLIBS) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKG_CONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKG_CONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no SAVED_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -Werror" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) CFLAGS="$SAVED_CFLAGS" if cc_supports_flag "-Wformat-nonliteral"; then gcc_format_nonliteral=yes else gcc_format_nonliteral=no fi # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat-security -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wwrite-strings -Wunused-but-set-variable -Wunsigned-char" if test "x$gcc_diagnostic_push_pull" = "xyes"; then AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" else if test "x$gcc_format_nonliteral" = "xyes"; then EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" fi fi # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $CC_EXTRAS $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (attrd, cib, crmd, lrmd, stonithd, pacemakerd, pacemaker_remoted, dnl pengine) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi if test "x${HARDENING}" = "xno"; then AC_MSG_NOTICE([Hardening: explicitly disabled]) elif test "x${HARDENING}" = "xyes" \ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then dnl We'll figure out on our own... CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; relro=1]) # daemons: PIE for both CFLAGS and LDFLAGS if cc_supports_flag -fPIE; then flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; pie=1]) fi # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries if test "${relro}" = 1 && test "${pie}" = 1; then flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; bindnow=1]) fi if test "${bindnow}" = 1; then flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"]) fi # universal: prefer strong > all > default stack protector if possible flag= if cc_supports_flag -fstack-protector-strong; then flag="-fstack-protector-strong" elif cc_supports_flag -fstack-protector-all; then flag="-fstack-protector-all" elif cc_supports_flag -fstack-protector; then flag="-fstack-protector" fi if test -n "${flag}"; then CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 fi if test "${relro}" = 1 \ || test "${pie}" = 1 \ || test "${stackprot}" = 1; then AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) else AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) fi else AC_MSG_NOTICE([Hardening: using custom flags]) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff... AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--quiet" QUIET_MAKE_OPTS="--quiet" fi AC_MSG_RESULT(Supress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKE="${MAKE} \$(QUIET_MAKE_OPTS)" AC_SUBST(CC) AC_SUBST(MAKE) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_MAKE_OPTS) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ Doxyfile \ coverage.sh \ cts/Makefile \ + cts/CTSlab.py \ cts/CTSvars.py \ cts/LSBDummy \ + cts/OCFIPraTest.py \ cts/benchmark/Makefile \ cts/benchmark/clubench \ + cts/fence_dummy \ cts/lxc_autogen.sh \ cib/Makefile \ attrd/Makefile \ crmd/Makefile \ pengine/Makefile \ pengine/regression.core.sh \ doc/Makefile \ doc/Clusters_from_Scratch/publican.cfg \ + doc/Pacemaker_Administration/publican.cfg \ doc/Pacemaker_Development/publican.cfg \ doc/Pacemaker_Explained/publican.cfg \ doc/Pacemaker_Remote/publican.cfg \ + fencing/Makefile \ + fencing/regression.py \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ replace/Makefile \ lib/Makefile \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pengine.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/common/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/pengine/Makefile \ lib/transition/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ mcp/Makefile \ mcp/pacemaker \ mcp/pacemaker.service \ mcp/pacemaker.upstart \ mcp/pacemaker.combined.upstart \ - fencing/Makefile \ - fencing/regression.py \ lrmd/Makefile \ lrmd/regression.py \ lrmd/pacemaker_remote.service \ lrmd/pacemaker_remote \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ tools/Makefile \ tools/crm_report \ tools/report.common \ tools/cibsecret \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ lib/gnu/Makefile \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/cts/CTSlab.py b/cts/CTSlab.py.in similarity index 99% rename from cts/CTSlab.py rename to cts/CTSlab.py.in index 8465331483..321949e2a0 100755 --- a/cts/CTSlab.py +++ b/cts/CTSlab.py.in @@ -1,143 +1,143 @@ -#!/usr/bin/python +#!@PYTHON@ '''CTS: Cluster Testing System: Lab environment module ''' __copyright__ = ''' Copyright (C) 2001,2005 Alan Robertson Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. import sys, signal, os pdir = os.path.dirname(sys.path[0]) sys.path.insert(0, pdir) # So that things work from the source directory try: from cts.CTSvars import * from cts.CM_corosync import * from cts.CTSaudits import AuditList from cts.CTStests import TestList from cts.CTSscenarios import * from cts.logging import LogFactory except ImportError as e: sys.stderr.write("abort: %s\n" % e) sys.stderr.write("check your install and PYTHONPATH; couldn't find cts libraries in:\n%s\n" % ' '.join(sys.path)) sys.exit(1) # These are globals so they can be used by the signal handler. cm = None scenario = None LogFactory().add_stderr() def sig_handler(signum, frame) : LogFactory().log("Interrupted by signal %d"%signum) if scenario: scenario.summarize() if signum == 15 : if scenario: scenario.TearDown() sys.exit(1) if __name__ == '__main__': Environment = CtsLab(sys.argv[1:]) NumIter = Environment["iterations"] Tests = [] # Set the signal handler signal.signal(15, sig_handler) signal.signal(10, sig_handler) # Create the Cluster Manager object if Environment["Stack"] == "corosync 2.x": cm = crm_corosync(Environment) else: LogFactory().log("Unknown stack: "+Environment["stack"]) sys.exit(1) if Environment["TruncateLog"] == 1: if Environment["OutputFile"] is None: LogFactory().log("Ignoring truncate request because no output file specified") else: LogFactory().log("Truncating %s" % Environment["OutputFile"]) with open(Environment["OutputFile"], "w") as outputfile: outputfile.truncate(0) Audits = AuditList(cm) if Environment["ListTests"] == 1: Tests = TestList(cm, Audits) LogFactory().log("Total %d tests"%len(Tests)) for test in Tests : LogFactory().log(str(test.name)); sys.exit(0) elif len(Environment["tests"]) == 0: Tests = TestList(cm, Audits) else: Chosen = Environment["tests"] for TestCase in Chosen: match = None for test in TestList(cm, Audits): if test.name == TestCase: match = test if not match: LogFactory().log("--choose: No applicable/valid tests chosen") sys.exit(1) else: Tests.append(match) # Scenario selection if Environment["scenario"] == "basic-sanity": scenario = RandomTests(cm, [ BasicSanityCheck(Environment) ], Audits, Tests) elif Environment["scenario"] == "all-once": NumIter = len(Tests) scenario = AllOnce( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) elif Environment["scenario"] == "sequence": scenario = Sequence( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) elif Environment["scenario"] == "boot": scenario = Boot(cm, [ LeaveBooted(Environment)], Audits, []) else: scenario = RandomTests( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TESTS ") LogFactory().log("Stack: %s (%s)" % (Environment["Stack"], Environment["Name"])) LogFactory().log("Schema: %s" % Environment["Schema"]) LogFactory().log("Scenario: %s" % scenario.__doc__) LogFactory().log("CTS Master: %s" % Environment["cts-master"]) LogFactory().log("CTS Logfile: %s" % Environment["OutputFile"]) LogFactory().log("Random Seed: %s" % Environment["RandSeed"]) LogFactory().log("Syslog variant: %s" % Environment["syslogd"].strip()) LogFactory().log("System log files: %s" % Environment["LogFileName"]) if Environment.has_key("IPBase"): LogFactory().log("Base IP for resources: %s" % Environment["IPBase"]) LogFactory().log("Cluster starts at boot: %d" % Environment["at-boot"]) Environment.dump() rc = Environment.run(scenario, NumIter) sys.exit(rc) diff --git a/cts/HBDummy.in b/cts/HBDummy.in deleted file mode 100755 index f1cba94dc0..0000000000 --- a/cts/HBDummy.in +++ /dev/null @@ -1,114 +0,0 @@ -#!/bin/sh -# -# -# Dummy Heartbeat RA. Does nothing but touch and remove a state file -# -# Copyright (c) 2015 Lars Ellenberg -# -# Based on LSBDummy, -# Copyright (c) 2010 Andrew Beekhof -# All Rights Reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# -# Further, this software is distributed without any warranty that it is -# free of the rightful claim of any third person regarding infringement -# or the like. Any license provided herein, whether implied or -# otherwise, applies only to this software file. Patent licenses, if -# any, provided herein do not apply to combinations of this program with -# other software, or any other product whatsoever. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. -# - -####################################################################### -# Initialization: - -desc="Dummy Heartbeat service" -. @OCF_ROOT_DIR@/resource.d/heartbeat/.ocf-directories -: ${HA_VARRUN=/tmp} # Backup in case .ocf-directories doesn't exist - -####################################################################### - -success() -{ - echo -ne "[ OK ]\r" -} - -failure() -{ - echo -ne "[FAILED]\r" -} - -dummy_usage() { - cat < Copyright (C) 2004 International Business Machines Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. import string, sys, struct, os, random, time, syslog from cts.CTSvars import * def usage(): print("usage: " + sys.argv[0] \ + " [-2]"\ + " [--ipbase|-i first-test-ip]"\ + " [--ipnum|-n test-ip-num]"\ + " [--help|-h]"\ + " [--perform|-p op]"\ + " [number-of-iterations]") sys.exit(1) def perform_op(ra, ip, op): os.environ["OCF_RA_VERSION_MAJOR"] = "1" os.environ["OCF_RA_VERSION_MINOR"] = "0" os.environ["OCF_ROOT"] = CTSvars.OCF_ROOT_DIR os.environ["OCF_RESOURCE_INSTANCE"] = ip os.environ["OCF_RESOURCE_TYPE"] = ra os.environ["OCF_RESKEY_ip"] = ip os.environ["HA_LOGFILE"] = "/dev/null" os.environ["HA_LOGFACILITY"] = "local7" path = CTSvars.OCF_ROOT_DIR + "/resource.d/heartbeat/" + ra return os.spawnvpe(os.P_WAIT, path, [ra, op], os.environ) def audit(ra, iplist, ipstatus, summary): passed = 1 for ip in iplist: ret = perform_op(ra, ip, "monitor") if ret != ipstatus[ip]: passed = 0 log("audit: status of %s should be %d but it is %d\t [failure]" % (ip,ipstatus[ip],ret)) ipstatus[ip] = ret summary["audit"]["called"] += 1; if passed : summary["audit"]["success"] += 1 else : summary["audit"]["failure"] += 1 def log(towrite): t = time.strftime("%Y/%m/%d_%H:%M:%S\t", time.localtime(time.time())) logstr = t + " "+str(towrite) syslog.syslog(logstr) print(logstr) if __name__ == '__main__': ra = "IPaddr" ipbase = "127.0.0.10" ipnum = 1 itnum = 50 perform = None summary = { "start":{"called":0,"success":0,"failure":0}, "stop" :{"called":0,"success":0,"failure":0}, "audit":{"called":0,"success":0,"failure":0} } syslog.openlog(sys.argv[0], 0, syslog.LOG_LOCAL7) # Process arguments... skipthis = None args = sys.argv[1:] for i in range(0, len(args)) : if skipthis : skipthis = None continue elif args[i] == "-2" : ra = "IPaddr2" elif args[i] == "--ip" or args[i] == "-i" : skipthis = 1 ipbase = args[i+1] elif args[i] == "--ipnum" or args[i] == "-n" : skipthis = 1 ipnum = int(args[i+1]) elif args[i] == "--perform" or args[i] == "-p" : skipthis = 1 perform = args[i+1] elif args[i] == "--help" or args[i] == "-h" : usage() else: itnum = int(args[i]) log("Begin OCF IPaddr/IPaddr2 Test") # Generate the test ips iplist = [] ipstatus = {} fields = string.split(ipbase, '.') for i in range(0, ipnum) : ip = string.join(fields, '.') iplist.append(ip) ipstatus[ip] = perform_op(ra,ip,"monitor") fields[3] = str(int(fields[3])+1) log("Test ip:" + str(iplist)) # If use ask perform an operation if perform != None: log("Perform opeartion %s"%perform) for ip in iplist: perform_op(ra, ip, perform) log("Done") sys.exit() log("RA Type:" + ra) log("Test Count:" + str(itnum)) # Prepare Random f = open("/dev/urandom", "r") seed = struct.unpack("BBB", f.read(3)) f.close() #seed=(123,321,231) rand = random.Random() rand.seed(seed[0]) log("Test Random Seed:" + str(seed)) # # Begin Tests log(">>>>>>>>>>>>>>>>>>>>>>>>") for i in range(0, itnum): ip = rand.choice(iplist) if ipstatus[ip] == 0: op = "stop" elif ipstatus[ip] == 7: op = "start" else : op = rand.choice(["start","stop"]) ret = perform_op(ra, ip, op) # update status if op == "start" and ret == 0: ipstatus[ip] = 0 elif op == "stop" and ret == 0: ipstatus[ip] = 7 else : ipstatus[ip] = 1 result = "" if ret == 0: result = "success" else : result = "failure" summary[op]["called"] += 1 summary[op][result] += 1 log( "%d:%s %s \t[%s]"%(i, op, ip, result)) audit(ra, iplist, ipstatus, summary) log("<<<<<<<<<<<<<<<<<<<<<<<<") log("start:\t" + str(summary["start"])) log("stop: \t" + str(summary["stop"])) log("audit:\t" + str(summary["audit"])) diff --git a/fencing/fence_dummy b/cts/fence_dummy.in similarity index 99% rename from fencing/fence_dummy rename to cts/fence_dummy.in index e343a4e993..95dec43899 100755 --- a/fencing/fence_dummy +++ b/cts/fence_dummy.in @@ -1,463 +1,463 @@ -#!/usr/bin/python +#!@PYTHON@ """Dummy fence agent for testing """ # Pacemaker targets compatibility with Python 2.7 and 3.2+ from __future__ import print_function, unicode_literals, absolute_import, division __copyright__ = "Copyright (C) 2012-2018 Andrew Beekhof " __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import re import sys import time import random import atexit import getopt AGENT_VERSION = "4.0.0" OCF_VERSION = "1.0" SHORT_DESC = "Dummy fence agent" LONG_DESC = """fence_dummy is a fake fencing agent which reports success based on its mode (pass|fail|random) without doing anything.""" # Short options used: ifhmnoqsvBDHMRUV ALL_OPT = { "quiet" : { "getopt" : "q", "help" : "", "order" : 50 }, "verbose" : { "getopt" : "v", "longopt" : "verbose", "help" : "-v, --verbose Verbose mode", "required" : "0", "shortdesc" : "Verbose mode", "order" : 51 }, "debug" : { "getopt" : "D:", "longopt" : "debug-file", "help" : "-D, --debug-file=[debugfile] Debugging to output file", "required" : "0", "shortdesc" : "Write debug information to given file", "order" : 52 }, "version" : { "getopt" : "V", "longopt" : "version", "help" : "-V, --version Display version information and exit", "required" : "0", "shortdesc" : "Display version information and exit", "order" : 53 }, "help" : { "getopt" : "h", "longopt" : "help", "help" : "-h, --help Display this help and exit", "required" : "0", "shortdesc" : "Display help and exit", "order" : 54 }, "action" : { "getopt" : "o:", "longopt" : "action", "help" : "-o, --action=[action] Action: status, list, reboot (default), off or on", "required" : "1", "shortdesc" : "Fencing Action", "default" : "reboot", "order" : 1 }, "nodename" : { "getopt" : "N:", "longopt" : "nodename", "help" : "-N, --nodename Node name of fence victim (ignored)", "required" : "0", "shortdesc" : "The node name of fence victim (ignored)", "order" : 2 }, "mode": { "getopt" : "M:", "longopt" : "mode", "required" : "0", "help" : "-M, --mode=(pass|fail|random) Exit status to return for non-monitor operations", "shortdesc" : "Whether fence operations should always pass, always fail, or fail at random", "order" : 3 }, "monitor_mode" : { "getopt" : "m:", "longopt" : "monitor_mode", "help" : "-m, --monitor_mode=(pass|fail|random) Exit status to return for monitor operations", "required" : "0", "shortdesc" : "Whether monitor operations should always pass, always fail, or fail at random", "order" : 3 }, "random_sleep_range": { "getopt" : "R:", "required" : "0", "longopt" : "random_sleep_range", "help" : "-R, --random_sleep_range=[seconds] Sleep between 1 and [seconds] before returning", "shortdesc" : "Wait randomly between 1 and [seconds]", "order" : 3 }, "mock_dynamic_hosts" : { "getopt" : "H:", "longopt" : "mock_dynamic_hosts", "help" : "-H, --mock_dynamic_hosts=[list] What to return when dynamically queried for possible targets", "required" : "0", "shortdesc" : "A list of hosts we can fence", "order" : 3 }, "delay" : { "getopt" : "f:", "longopt" : "delay", "help" : "-f, --delay [seconds] Wait X seconds before fencing is started", "required" : "0", "shortdesc" : "Wait X seconds before fencing is started", "default" : "0", "order" : 3 }, "port" : { "getopt" : "n:", "longopt" : "plug", "help" : "-n, --plug=[id] Physical plug number on device (ignored)", "required" : "1", "shortdesc" : "Ignored", "order" : 4 }, "switch" : { "getopt" : "s:", "longopt" : "switch", "help" : "-s, --switch=[id] Physical switch number on device (ignored)", "required" : "0", "shortdesc" : "Ignored", "order" : 4 }, "nodeid" : { "getopt" : "i:", "longopt" : "nodeid", "help" : "-i, --nodeid Corosync id of fence victim (ignored)", "required" : "0", "shortdesc" : "Ignored", "order" : 4 }, "uuid" : { "getopt" : "U:", "longopt" : "uuid", "help" : "-U, --uuid UUID of the VM to fence (ignored)", "required" : "0", "shortdesc" : "Ignored", "order" : 4 } } def agent(): """ Return name this file was run as. """ return os.path.basename(sys.argv[0]) def fail_usage(message): """ Print a usage message and exit. """ sys.exit("%s\nPlease use '-h' for usage" % message) def show_docs(options): """ Handle informational options (display info and exit). """ device_opt = options["device_opt"] if "-h" in options: usage(device_opt) sys.exit(0) if "-o" in options and options["-o"].lower() == "metadata": metadata(device_opt, options) sys.exit(0) if "-V" in options: print(AGENT_VERSION) sys.exit(0) def sorted_options(avail_opt): """ Return a list of all options, in their internally specified order. """ sorted_list = [(key, ALL_OPT[key]) for key in avail_opt] sorted_list.sort(key=lambda x: x[1]["order"]) return sorted_list def usage(avail_opt): """ Print a usage message. """ print("Usage:") print("\t" + agent() + " [options]") print("Options:") for dummy, value in sorted_options(avail_opt): if len(value["help"]) != 0: print(" " + value["help"]) def metadata(avail_opt, options): """ Print agent metadata. """ # This log is just for testing handling of stderr output print("asked for fence_dummy metadata", file=sys.stderr) print(""" %s %s """ % (agent(), SHORT_DESC, AGENT_VERSION, OCF_VERSION, LONG_DESC)) for option, dummy in sorted_options(avail_opt): if "shortdesc" in ALL_OPT[option]: print("\t") default = "" default_name_arg = "-" + ALL_OPT[option]["getopt"][:-1] default_name_no_arg = "-" + ALL_OPT[option]["getopt"] if "default" in ALL_OPT[option]: default = 'default="%s"' % str(ALL_OPT[option]["default"]) elif default_name_arg in options: if options[default_name_arg]: try: default = 'default="%s"' % options[default_name_arg] except TypeError: ## @todo/@note: Currently there is no clean way how to handle lists ## we can create a string from it but we can't set it on command line default = 'default="%s"' % str(options[default_name_arg]) elif default_name_no_arg in options: default = 'default="true"' mixed = ALL_OPT[option]["help"] ## split it between option and help text res = re.compile(r"^(.*--\S+)\s+", re.IGNORECASE | re.S).search(mixed) if None != res: mixed = res.group(1) mixed = mixed.replace("<", "<").replace(">", ">") print("\t\t") if ALL_OPT[option]["getopt"].count(":") > 0: print("\t\t") else: print("\t\t") print("\t\t" + ALL_OPT[option]["shortdesc"] + "") print("\t") print(""" \t \t \t \t \t \t \t """) def option_longopt(option): """ Return the getopt-compatible long-option name of the given option. """ if ALL_OPT[option]["getopt"].endswith(":"): return ALL_OPT[option]["longopt"] + "=" else: return ALL_OPT[option]["longopt"] def opts_from_command_line(argv, avail_opt): """ Read options from command-line arguments. """ # Prepare list of options for getopt getopt_string = "" longopt_list = [] for k in avail_opt: if k in ALL_OPT: getopt_string += ALL_OPT[k]["getopt"] else: fail_usage("Parse error: unknown option '"+k+"'") if k in ALL_OPT and "longopt" in ALL_OPT[k]: longopt_list.append(option_longopt(k)) try: opt, dummy = getopt.gnu_getopt(argv, getopt_string, longopt_list) except getopt.GetoptError as error: fail_usage("Parse error: " + error.msg) # Transform longopt to short one which are used in fencing agents old_opt = opt opt = {} for old_option in dict(old_opt).keys(): if old_option.startswith("--"): for option in ALL_OPT.keys(): if "longopt" in ALL_OPT[option] and "--" + ALL_OPT[option]["longopt"] == old_option: opt["-" + ALL_OPT[option]["getopt"].rstrip(":")] = dict(old_opt)[old_option] else: opt[old_option] = dict(old_opt)[old_option] # Compatibility Layer (with what? probably not needed for fence_dummy) new_opt = dict(opt) if "-T" in new_opt: new_opt["-o"] = "status" if "-n" in new_opt: new_opt["-m"] = new_opt["-n"] opt = new_opt return opt def opts_from_stdin(avail_opt): """ Read options from standard input. """ opt = {} name = "" for line in sys.stdin.readlines(): line = line.strip() if line.startswith("#") or (len(line) == 0): continue (name, value) = (line + "=").split("=", 1) value = value[:-1] # Compatibility Layer (with what? probably not needed for fence_dummy) if name == "option": name = "action" if name not in avail_opt: print("Parse error: Ignoring unknown option '%s'" % line, file=sys.stderr) continue if ALL_OPT[name]["getopt"].endswith(":"): opt["-"+ALL_OPT[name]["getopt"].rstrip(":")] = value elif value.lower() in ["1", "yes", "on", "true"]: opt["-"+ALL_OPT[name]["getopt"]] = "1" return opt def process_input(avail_opt): """ Set standard environment variables, and parse all options. """ # Set standard environment os.putenv("LANG", "C") os.putenv("LC_ALL", "C") # Read options from command line or standard input if len(sys.argv) > 1: return opts_from_command_line(sys.argv[1:], avail_opt) else: return opts_from_stdin(avail_opt) def atexit_handler(): """ Close stdout on exit. """ try: sys.stdout.close() os.close(1) except IOError: sys.exit("%s failed to close standard output" % agent()) def success_mode(options, option, default_value): """ Return exit code specified by option. """ if option in options: test_value = options[option] else: test_value = default_value if test_value == "pass": exitcode = 0 elif test_value == "fail": exitcode = 1 else: exitcode = random.randint(0, 1) return exitcode def write_options(options): """ Write out all options to debug file. """ try: debugfile = io.open(options["-D"], 'at') debugfile.write("### %s ###\n" % (time.strftime("%Y-%m-%d %H:%M:%S"))) for option in sorted(options): debugfile.write("%s=%s\n" % (option, options[option])) debugfile.write("###\n") debugfile.close() except IOError: pass def main(): """ Make it so! """ device_opt = ALL_OPT.keys() ## Defaults for fence agent atexit.register(atexit_handler) options = process_input(device_opt) options["device_opt"] = device_opt show_docs(options) # dump input to file if "-D" in options: write_options(options) if "-f" in options: val = int(options["-f"]) print("delay sleep for %d seconds" % val, file=sys.stderr) time.sleep(val) # random sleep for testing if "-R" in options: val = int(options["-R"]) ran = random.randint(1, val) print("random sleep for %d seconds" % ran, file=sys.stderr) time.sleep(ran) if "-o" in options: action = options["-o"] else: action = "action" if action == "monitor": exitcode = success_mode(options, "-m", "pass") elif action == "list": print("fence_dummy action (list) called", file=sys.stderr) if "-H" in options: print(options["-H"]) exitcode = 0 else: print("dynamic hostlist requires mock_dynamic_hosts to be set", file=sys.stderr) exitcode = 1 else: exitcode = success_mode(options, "-M", "random") # Ensure we generate some error output on failure exit. if exitcode == 1: print("simulated %s failure" % action, file=sys.stderr) sys.exit(exitcode) if __name__ == "__main__": main() diff --git a/doc/Clusters_from_Scratch/en-US/images b/doc/Clusters_from_Scratch/en-US/images new file mode 120000 index 0000000000..963300dd95 --- /dev/null +++ b/doc/Clusters_from_Scratch/en-US/images @@ -0,0 +1 @@ +../../shared/en-US/images \ No newline at end of file diff --git a/doc/Clusters_from_Scratch/en-US/images/pcmk-active-active.svg b/doc/Clusters_from_Scratch/en-US/images/pcmk-active-active.svg deleted file mode 100644 index f07a20b451..0000000000 --- a/doc/Clusters_from_Scratch/en-US/images/pcmk-active-active.svg +++ /dev/null @@ -1,1419 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - D'base - - - - - Web Site - - Web Site - - Web Site - - Web Site - - GFS2 - - - - - - - - Host - Host - Host - Host - - Shared Storage - CoroSync - Pacemaker - Active / Active - ServicesClusterSoftwareHardware - GFS2 - GFS2 - GFS2 - URL - Mail - D'base - URL - Mail - D'base - URL - Mail - D'base - URL - Mail - - - - diff --git a/doc/Clusters_from_Scratch/en-US/images/pcmk-active-passive.svg b/doc/Clusters_from_Scratch/en-US/images/pcmk-active-passive.svg deleted file mode 100644 index 2e0b0b01e9..0000000000 --- a/doc/Clusters_from_Scratch/en-US/images/pcmk-active-passive.svg +++ /dev/null @@ -1,1079 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - URL - - - Web Site - - - - Files - - Storage - - - - Host - Host - CoroSync - Pacemaker - Active / Passive - ServicesClusterSoftwareHardware - URL - D/base - Storage - D/base - - - - Synch - Synch - - - diff --git a/doc/Clusters_from_Scratch/en-US/images/pcmk-shared-failover.svg b/doc/Clusters_from_Scratch/en-US/images/pcmk-shared-failover.svg deleted file mode 100644 index b28d09cea3..0000000000 --- a/doc/Clusters_from_Scratch/en-US/images/pcmk-shared-failover.svg +++ /dev/null @@ -1,1306 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - URL - - - - Mail - - - - - Files - - - - - Storage - - - - - - Host - Host - Host - Host - CoroSync - Pacemaker - Shared Failover - Hardware - ClusterSoftware - Services - Storage - D'base - Storage - Storage - D'base - URL - URL - Web Site - Files - - - - Synch - Synch - Synch - - - - diff --git a/doc/Makefile.am b/doc/Makefile.am index 781c4c7b97..76fc9e014e 100644 --- a/doc/Makefile.am +++ b/doc/Makefile.am @@ -1,286 +1,310 @@ # # doc: Pacemaker code # # Copyright (C) 2008 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # include $(top_srcdir)/Makefile.common helpdir = $(datadir)/$(PACKAGE) ascii = crm_fencing.txt acls.txt docbook = Clusters_from_Scratch \ + Pacemaker_Administration \ Pacemaker_Development \ Pacemaker_Explained \ Pacemaker_Remote doc_DATA = $(ascii) $(generated_docs) # toplevel rsync destination for www targets (without trailing slash) RSYNC_DEST ?= root@www.clusterlabs.org:/var/www/html # recursive, preserve symlinks/permissions/times, verbose, compress, # don't cross filesystems, sparse, show progress RSYNC_OPTS = -rlptvzxS --progress publican_docs = generated_docs = generated_mans = # What formats to build: pdf,html,html-single,html-desktop,epub DOCBOOK_FORMATS := html-desktop # What languages to build DOCBOOK_LANGS := en-US # What languages to build for uploading to website # (currently only en-US because translations aren't up-to-date) UPLOAD_LANGS = en-US DOTs = $(wildcard */en-US/images/*.dot) SVG = $(wildcard */en-US/images/pcmk-*.svg) $(DOTs:%.dot=%.svg) generated_PNGS = $(SVG:%.svg=%-small.png) $(SVG:%.svg=%.png) $(SVG:%.svg=%-large.png) \ Pacemaker_Explained/en-US/images/Policy-Engine-big.png \ Pacemaker_Explained/en-US/images/Policy-Engine-small.png PNGS = $(generated_PNGS) \ Pacemaker_Remote/en-US/images/pcmk-ha-cluster-stack.png \ Pacemaker_Remote/en-US/images/pcmk-ha-remote-stack.png BRAND_PNGS = publican-clusterlabs/en-US/images/title_logo.png \ publican-clusterlabs/en-US/images/image_left.png \ publican-clusterlabs/en-US/images/image_right.png \ publican-clusterlabs/en-US/images/h1-bg.png graphics: $(PNGS) %.png: %.svg $(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=90 -C --export-png=$@ %-small.png: %.svg $(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=45 -C --export-png=$@ %-large.png: %.svg $(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=180 -C --export-png=$@ if BUILD_ASCIIDOC generated_docs += $(ascii:%.txt=%.html) if BUILD_DOCBOOK publican_docs += $(docbook) endif endif EXTRA_DIST = $(docbook:%=%.xml) %.html: %.txt $(AM_V_ASCII)$(ASCIIDOC) --unsafe --backend=xhtml11 $< # publican-clusterlabs/xsl/{html,html-single,pdf}.xsl refer to URIs # requiring Internet access, hence we shadow that with a XML catalog-based # redirect to local files brought with Publican installation; # this is what newer Publican normally does with the system-wide catalog # upon its installation, but let's provide a compatibility for older # or badly installed instances (via adding the created file into # XML_CATALOG_FILES for libxml2 backing Publican as a fallback); # note that nextCatalog arrangement needed so as to overcome # https://rt.cpan.org/Public/Bug/Display.html?id=113781 publican-catalog-fallback: @exec >$@-t \ && echo '' \ && echo '' \ && echo '' $(AM_V_GEN)mv $@-t $@ publican-catalog: publican-catalog-fallback @exec >$@-t \ && echo '' \ && echo '' \ && echo '' \ && echo '' $(AM_V_GEN)mv $@-t $@ SHARED_TXT=$(wildcard shared/en-US/*.txt) SHARED_XML=$(SHARED_TXT:%.txt=%.xml) CFS_SHARED_TXT=$(addprefix shared/en-US/,pacemaker-intro.txt) CFS_SHARED_XML=$(CFS_SHARED_TXT:%.txt=%.xml) CFS_TXT=$(wildcard Clusters_from_Scratch/en-US/*.txt) CFS_XML=$(CFS_TXT:%.txt=%.xml) $(CFS_XML): $(CFS_SHARED_XML) PUBLICAN_INTREE_DEPS = if PUBLICAN_INTREE_BRAND PUBLICAN_INTREE_DEPS += publican-catalog endif # We have to hardcode the book name # With '%' the test for 'newness' fails Clusters_from_Scratch.build: $(PNGS) $(wildcard Clusters_from_Scratch/en-US/*.xml) $(CFS_XML) $(CFS_SHARED_XML) $(PUBLICAN_INTREE_DEPS) + $(PCMK_V) @echo Building $(@:%.build=%) because of $? + rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp +if PUBLICAN_INTREE_BRAND + $(AM_V_PUB)cd $(@:%.build=%) \ + && RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \ + $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \ + $(PCMK_quiet) +else + $(AM_V_PUB)cd $(@:%.build=%) \ + && RPM_BUILD_DIR="" \ + $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \ + $(PCMK_quiet) +endif + rm -rf $(@:%.build=%)/tmp + touch $@ + + +PA_TXT=$(wildcard Pacemaker_Administration/en-US/*.txt) +PA_XML=$(PA_TXT:%.txt=%.xml) + +# We have to hardcode the book name +# With '%' the test for 'newness' fails +Pacemaker_Administration.build: $(wildcard Pacemaker_Administration/en-US/*.xml) $(PA_XML) $(PUBLICAN_INTREE_DEPS) $(PCMK_V) @echo Building $(@:%.build=%) because of $? rm -rf $(@:%.build=%)/publish/* if PUBLICAN_INTREE_BRAND $(AM_V_PUB)cd $(@:%.build=%) \ && RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \ $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \ $(PCMK_quiet) else $(AM_V_PUB)cd $(@:%.build=%) \ && RPM_BUILD_DIR="" \ $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \ $(PCMK_quiet) endif rm -rf $(@:%.build=%)/tmp touch $@ PD_TXT=$(wildcard Pacemaker_Development/en-US/*.txt) PD_XML=$(PD_TXT:%.txt=%.xml) # We have to hardcode the book name # With '%' the test for 'newness' fails Pacemaker_Development.build: $(wildcard Pacemaker_Development/en-US/*.xml) $(PD_XML) $(PUBLICAN_INTREE_DEPS) $(PCMK_V) @echo Building $(@:%.build=%) because of $? - rm -rf $(@:%.build=%)/publish/* + rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp if PUBLICAN_INTREE_BRAND $(AM_V_PUB)cd $(@:%.build=%) \ && RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \ $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \ $(PCMK_quiet) else $(AM_V_PUB)cd $(@:%.build=%) \ && RPM_BUILD_DIR="" \ $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \ $(PCMK_quiet) endif rm -rf $(@:%.build=%)/tmp touch $@ PE_SHARED_TXT=$(addprefix shared/en-US/,pacemaker-intro.txt) PE_SHARED_XML=$(PE_SHARED_TXT:%.txt=%.xml) PE_TXT=$(wildcard Pacemaker_Explained/en-US/*.txt) PE_XML=$(PE_TXT:%.txt=%.xml) $(PE_XML): $(PE_SHARED_XML) # We have to hardcode the book name # With '%' the test for 'newness' fails Pacemaker_Explained.build: $(PNGS) $(wildcard Pacemaker_Explained/en-US/*.xml) $(PE_XML) $(PE_SHARED_XML) $(PUBLICAN_INTREE_DEPS) $(PCMK_V) @echo Building $(@:%.build=%) because of $? - rm -rf $(@:%.build=%)/publish/* + rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp if PUBLICAN_INTREE_BRAND $(AM_V_PUB)cd $(@:%.build=%) \ && RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \ $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \ $(PCMK_quiet) else $(AM_V_PUB)cd $(@:%.build=%) \ && RPM_BUILD_DIR="" \ $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \ $(PCMK_quiet) endif rm -rf $(@:%.build=%)/tmp touch $@ PR_TXT=$(wildcard Pacemaker_Remote/en-US/*.txt) PR_XML=$(PR_TXT:%.txt=%.xml) # We have to hardcode the book name # With '%' the test for 'newness' fails Pacemaker_Remote.build: $(PNGS) $(wildcard Pacemaker_Remote/en-US/*.xml) $(PR_XML) $(PUBLICAN_INTREE_DEPS) $(PCMK_V) @echo Building $(@:%.build=%) because of $? - rm -rf $(@:%.build=%)/publish/* + rm -rf $(@:%.build=%)/publish/* $(@:%.build=%)/tmp if PUBLICAN_INTREE_BRAND $(AM_V_PUB)cd $(@:%.build=%) \ && RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \ $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \ $(PCMK_quiet) else $(AM_V_PUB)cd $(@:%.build=%) \ && RPM_BUILD_DIR="" \ $(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \ $(PCMK_quiet) endif rm -rf $(@:%.build=%)/tmp touch $@ # Update the translation template pot: for book in $(docbook); do \ echo "Updating translation templates in: $$book"; \ ( cd $$book && RPM_BUILD_DIR="" $(PUBLICAN) update_pot ); \ done # Update the actual translations po: pot for book in $(docbook); do \ echo "Updating translations in: $$book"; \ ( cd $$book && RPM_BUILD_DIR="" $(PUBLICAN) update_po --langs=all );\ done if BUILD_DOCBOOK docbook_build = $(docbook:%=%.build) all-local: $(docbook_build) */publican.cfg install-data-local: all-local for book in $(docbook); do \ filelist=`find $$book/publish/* -print`; \ for f in $$filelist; do \ p=`echo $$f | sed s:publish/:: | sed s:Pacemaker/::`; \ if [ -d $$f ]; then \ $(INSTALL) -d -m 775 $(DESTDIR)$(docdir)/$$p; \ else \ $(INSTALL) -m 644 $$f $(DESTDIR)$(docdir)/$$p; \ fi \ done; \ done endif brand: $(BRAND_PNGS) $(wildcard publican-clusterlabs/en-US/*.xml) cd publican-clusterlabs && publican build --formats=xml --langs=all --publish echo "Installing..." cd publican-clusterlabs && sudo publican install_brand --path=$(datadir)/publican/Common_Content # find publican-clusterlabs -name "*.noarch.rpm" -exec rm -f \{\} \; # cd publican-clusterlabs && $(PUBLICAN) package --binary # find publican-clusterlabs -name "*.noarch.rpm" -exec sudo rpm -Uvh --force \{\} \; pdf: make DOCBOOK_FORMATS="pdf" all-local www: clean-local $(generated_docs) $(ascii) for book in $(docbook); do \ sed -i.sed 's@^brand:.*@brand: clusterlabs@' $$book/publican.cfg; \ done make DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="$(UPLOAD_LANGS)" all-local echo Uploading current $(PACKAGE_SERIES) documentation set to clusterlabs.org if BUILD_DOCBOOK for book in $(docbook); do \ echo Uploading $$book...; \ echo "Generated on `date` from version: $(shell git log --pretty="format:%h %d" -n 1)" >> $$book/publish/build-$(PACKAGE_SERIES).txt; \ rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/$(PACKAGE)/doc/"; \ done endif rsync $(RSYNC_OPTS) $(generated_docs) $(ascii) "$(RSYNC_DEST)/$(PACKAGE)/doc/" clean-local: -rm -rf $(generated_docs) $(generated_mans) $(docbook_build) $(generated_PNGS) -rm -rf $(SHARED_XML) $(CFS_XML) $(PE_XML) $(PR_XML) -rm -rf publican-catalog-fallback publican-catalog for book in $(docbook); do rm -rf $$book/tmp $$book/publish; done diff --git a/doc/Pacemaker_Administration/en-US/Author_Group.xml b/doc/Pacemaker_Administration/en-US/Author_Group.xml new file mode 100644 index 0000000000..a40b3adcc6 --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Author_Group.xml @@ -0,0 +1,17 @@ + + + + + AndrewBeekhof + Red Hat + Co-author + andrew@beekhof.net + + + KenGaillot + Red Hat + Co-author + kgaillot@redhat.com + + diff --git a/doc/Pacemaker_Administration/en-US/Book_Info.xml b/doc/Pacemaker_Administration/en-US/Book_Info.xml new file mode 100644 index 0000000000..8622da75c6 --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Book_Info.xml @@ -0,0 +1,36 @@ + + +%BOOK_ENTITIES; +]> + + Pacemaker Administration + Managing Pacemaker Clusters + + 1 + 0 + + + This document has instructions and tips for system + administrators who need to manage high-availability + clusters using Pacemaker. + + + + + + + + + + + + + diff --git a/doc/Pacemaker_Explained/en-US/Ap-OCF.txt b/doc/Pacemaker_Administration/en-US/Ch-Agents.txt similarity index 76% rename from doc/Pacemaker_Explained/en-US/Ap-OCF.txt rename to doc/Pacemaker_Administration/en-US/Ch-Agents.txt index 25a9b721f1..8f2b6a3181 100644 --- a/doc/Pacemaker_Explained/en-US/Ap-OCF.txt +++ b/doc/Pacemaker_Administration/en-US/Ch-Agents.txt @@ -1,261 +1,338 @@ -[appendix] += Resource Agents = -[[ap-ocf]] -== More About OCF Resource Agents == +== OCF Resource Agents == === Location of Custom Scripts === indexterm:[OCF Resource Agents] OCF Resource Agents are found in +/usr/lib/ocf/resource.d/pass:[provider]+ When creating your own agents, you are encouraged to create a new directory under +/usr/lib/ocf/resource.d/+ so that they are not confused with (or overwritten by) the agents shipped by existing providers. So, for example, if you choose the provider name of bigCorp and want a new resource named bigApp, you would create a resource agent called +/usr/lib/ocf/resource.d/bigCorp/bigApp+ and define a resource: [source,XML] ---- ---- === Actions === All OCF resource agents are required to implement the following actions. .Required Actions for OCF Agents [width="95%",cols="3m,3,7",options="header",align="center"] |========================================================= |Action |Description |Instructions |start |Start the resource |Return 0 on success and an appropriate error code otherwise. Must not report success until the resource is fully active. indexterm:[start,OCF Action] indexterm:[OCF,Action,start] |stop |Stop the resource |Return 0 on success and an appropriate error code otherwise. Must not report success until the resource is fully stopped. indexterm:[stop,OCF Action] indexterm:[OCF,Action,stop] |monitor |Check the resource's state |Exit 0 if the resource is running, 7 if it is stopped, and anything else if it is failed. indexterm:[monitor,OCF Action] indexterm:[OCF,Action,monitor] NOTE: The monitor script should test the state of the resource on the local machine only. |meta-data |Describe the resource |Provide information about this resource as an XML snippet. Exit with 0. indexterm:[meta-data,OCF Action] indexterm:[OCF,Action,meta-data] NOTE: This is _not_ performed as root. |validate-all |Verify the supplied parameters |Return 0 if parameters are valid, 2 if not valid, and 6 if resource is not configured. indexterm:[validate-all,OCF Action] indexterm:[OCF,Action,validate-all] |========================================================= Additional requirements (not part of the OCF specification) are placed on -agents that will be used for advanced concepts such as -<> and <> resources. +agents that will be used for advanced concepts such as clones +and multi-state resources. .Optional Actions for OCF Resource Agents [width="95%",cols="2m,6,3",options="header",align="center"] |========================================================= |Action |Description |Instructions |promote |Promote the local instance of a multi-state resource to the master (primary) state. |Return 0 on success indexterm:[promote,OCF Action] indexterm:[OCF,Action,promote] |demote |Demote the local instance of a multi-state resource to the slave (secondary) state. |Return 0 on success indexterm:[demote,OCF Action] indexterm:[OCF,Action,demote] |notify |Used by the cluster to send the agent pre- and post-notification events telling the resource what has happened and will happen. |Must not fail. Must exit with 0 indexterm:[notify,OCF Action] indexterm:[OCF,Action,notify] |========================================================= One action specified in the OCF specs, +recover+, is not currently used by the cluster. It is intended to be a variant of the +start+ action that tries to recover a resource locally. [IMPORTANT] ==== If you create a new OCF resource agent, use indexterm:[ocf-tester]`ocf-tester` to verify that the agent complies with the OCF standard properly. ==== === How are OCF Return Codes Interpreted? === The first thing the cluster does is to check the return code against the expected result. If the result does not match the expected value, then the operation is considered to have failed, and recovery action is initiated. There are three types of failure recovery: .Types of recovery performed by the cluster [width="95%",cols="1m,4,4",options="header",align="center"] |========================================================= |Type |Description |Action Taken by the Cluster |soft |A transient error occurred |Restart the resource or move it to a new location indexterm:[soft,OCF error] indexterm:[OCF,error,soft] |hard |A non-transient error that may be specific to the current node occurred |Move the resource elsewhere and prevent it from being retried on the current node indexterm:[hard,OCF error] indexterm:[OCF,error,hard] |fatal |A non-transient error that will be common to all cluster nodes (e.g. a bad configuration was specified) |Stop the resource and prevent it from being started on any cluster node indexterm:[fatal,OCF error] indexterm:[OCF,error,fatal] |========================================================= [[s-ocf-return-codes]] === OCF Return Codes === The following table outlines the different OCF return codes and the type of recovery the cluster will initiate when a failure code is received. Although counterintuitive, even actions that return 0 (aka. +OCF_SUCCESS+) can be considered to have failed, if 0 was not the expected return value. .OCF Return Codes and their Recovery Types [width="95%",cols="1m,4>). + once is determined by the resource's +multiple-active+ property. * Recurring actions that return +OCF_ERR_UNIMPLEMENTED+ do not cause any type of recovery. + +== Init Script LSB Compliance == + +The relevant part of the +http://refspecs.linuxfoundation.org/lsb.shtml[LSB specifications] +includes a description of all the return codes listed here. + +Assuming `some_service` is configured correctly and currently +inactive, the following sequence will help you determine if it is +LSB-compatible: + +. Start (stopped): ++ +---- +# /etc/init.d/some_service start ; echo "result: $?" +---- ++ + .. Did the service start? + .. Did the command print *result: 0* (in addition to its usual output)? ++ +. Status (running): ++ +---- +# /etc/init.d/some_service status ; echo "result: $?" +---- ++ + .. Did the script accept the command? + .. Did the script indicate the service was running? + .. Did the command print *result: 0* (in addition to its usual output)? ++ +. Start (running): ++ +---- +# /etc/init.d/some_service start ; echo "result: $?" +---- ++ + .. Is the service still running? + .. Did the command print *result: 0* (in addition to its usual output)? ++ +. Stop (running): ++ +---- +# /etc/init.d/some_service stop ; echo "result: $?" +---- ++ + .. Was the service stopped? + .. Did the command print *result: 0* (in addition to its usual output)? ++ +. Status (stopped): ++ +---- +# /etc/init.d/some_service status ; echo "result: $?" +---- ++ + .. Did the script accept the command? + .. Did the script indicate the service was not running? + .. Did the command print *result: 3* (in addition to its usual output)? ++ +. Stop (stopped): ++ +---- +# /etc/init.d/some_service stop ; echo "result: $?" +---- ++ + .. Is the service still stopped? + .. Did the command print *result: 0* (in addition to its usual output)? ++ +. Status (failed): ++ +.. This step is not readily testable and relies on manual inspection of the script. ++ +The script can use one of the error codes (other than 3) listed in the +LSB spec to indicate that it is active but failed. This tells the +cluster that before moving the resource to another node, it needs to +stop it on the existing one first. + +If the answer to any of the above questions is no, then the script is +not LSB-compliant. Your options are then to either fix the script or +write an OCF agent based on the existing script. diff --git a/doc/Pacemaker_Administration/en-US/Ch-Cluster.txt b/doc/Pacemaker_Administration/en-US/Ch-Cluster.txt new file mode 100644 index 0000000000..3a14d7cdf3 --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Ch-Cluster.txt @@ -0,0 +1,58 @@ += The Cluster Layer = + +== Pacemaker and the Cluster Layer == + +Pacemaker utilizes an underlying cluster layer for two purposes: + +* obtaining quorum +* messaging between nodes + +Currently, only Corosync 2 and later is supported for this layer. + +== Managing Nodes in a Corosync-Based Cluster == + +=== Adding a New Corosync Node === + +indexterm:[Corosync,Add Cluster Node] +indexterm:[Add Cluster Node,Corosync] + +To add a new node: + +. Install Corosync and Pacemaker on the new host. +. Copy +/etc/corosync/corosync.conf+ and +/etc/corosync/authkey+ (if it exists) + from an existing node. You may need to modify the *mcastaddr* option to match + the new node's IP address. +. Start the cluster software on the new host. If a log message containing + "Invalid digest" appears from Corosync, the keys are not consistent between + the machines. + +=== Removing a Corosync Node === + +indexterm:[Corosync,Remove Cluster Node] +indexterm:[Remove Cluster Node,Corosync] + +Because the messaging and membership layers are the authoritative +source for cluster nodes, deleting them from the CIB is not a complete +solution. First, one must arrange for corosync to forget about the +node (*pcmk-1* in the example below). + +. Stop the cluster on the host to be removed. How to do this will vary with + your operating system and installed versions of cluster software, for example, + `pcs cluster stop` if you are using pcs for cluster management. +. From one of the remaining active cluster nodes, tell Pacemaker to forget + about the removed host, which will also delete the node from the CIB: ++ +---- +# crm_node -R pcmk-1 +---- + +=== Replacing a Corosync Node === + +indexterm:[Corosync,Replace Cluster Node] +indexterm:[Replace Cluster Node,Corosync] + +To replace an existing cluster node: + +. Make sure the old node is completely stopped. +. Give the new machine the same hostname and IP address as the old one. +. Follow the procedure above for adding a node. diff --git a/doc/Pacemaker_Explained/en-US/Ch-Basics.txt b/doc/Pacemaker_Administration/en-US/Ch-Configuring.txt similarity index 67% rename from doc/Pacemaker_Explained/en-US/Ch-Basics.txt rename to doc/Pacemaker_Administration/en-US/Ch-Configuring.txt index 6dbca50346..cffe780bbf 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Basics.txt +++ b/doc/Pacemaker_Administration/en-US/Ch-Configuring.txt @@ -1,385 +1,435 @@ -= Configuration Basics = - -== Configuration Layout == - -The cluster is defined by the Cluster Information Base (CIB), -which uses XML notation. The simplest CIB, an empty one, looks like this: - -.An empty configuration -====== -[source,XML] -------- - - - - - - - - - -------- -====== - -The empty configuration above contains the major sections that make up a CIB: - -* +cib+: The entire CIB is enclosed with a +cib+ tag. Certain fundamental settings - are defined as attributes of this tag. - - ** +configuration+: This section -- the primary focus of this document -- - contains traditional configuration information such as what resources the - cluster serves and the relationships among them. - - *** +crm_config+: cluster-wide configuration options - *** +nodes+: the machines that host the cluster - *** +resources+: the services run by the cluster - *** +constraints+: indications of how resources should be placed - - ** +status+: This section contains the history of each resource on each node. - Based on this data, the cluster can construct the complete current - state of the cluster. The authoritative source for this section - is the local resource manager (lrmd process) on each cluster node, and - the cluster will occasionally repopulate the entire section. For this - reason, it is never written to disk, and administrators are advised - against modifying it in any way. - -In this document, configuration settings will be described as 'properties' or 'options' -based on how they are defined in the CIB: - -* Properties are XML attributes of an XML element. -* Options are name-value pairs expressed as +nvpair+ child elements of an XML element. - -Normally you will use command-line tools that abstract the XML, so the -distinction will be unimportant; both properties and options are -cluster settings you can tweak. - -== The Current State of the Cluster == - -Before one starts to configure a cluster, it is worth explaining how -to view the finished product. For this purpose we have created the -`crm_mon` utility, which will display the -current state of an active cluster. It can show the cluster status by -node or by resource and can be used in either single-shot or -dynamically-updating mode. There are also modes for displaying a list -of the operations performed (grouped by node and resource) as well as -information about failures. - -Using this tool, you can examine the state of the cluster for -irregularities and see how it responds when you cause or simulate -failures. - -Details on all the available options can be obtained using the -`crm_mon --help` command. - -.Sample output from crm_mon -====== -------- - ============ - Last updated: Fri Nov 23 15:26:13 2007 - Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec) - 3 Nodes configured. - 5 Resources configured. - ============ - - Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online - 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1 - 192.168.100.182 (heartbeat:IPaddr): Started sles-1 - 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1 - rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1 - child_DoFencing:2 (stonith:external/vmware): Started sles-1 - Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby - Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online - rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3 - rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3 - child_DoFencing:0 (stonith:external/vmware): Started sles-3 -------- -====== - -.Sample output from crm_mon -n -====== -------- - ============ - Last updated: Fri Nov 23 15:26:13 2007 - Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec) - 3 Nodes configured. - 5 Resources configured. - ============ - - Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online - Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby - Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online - - Resource Group: group-1 - 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1 - 192.168.100.182 (heartbeat:IPaddr): Started sles-1 - 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1 - rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1 - rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3 - rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3 - Clone Set: DoFencing - child_DoFencing:0 (stonith:external/vmware): Started sles-3 - child_DoFencing:1 (stonith:external/vmware): Stopped - child_DoFencing:2 (stonith:external/vmware): Started sles-1 -------- -====== - -The DC (Designated Controller) node is where all the decisions are -made, and if the current DC fails a new one is elected from the -remaining cluster nodes. The choice of DC is of no significance to an -administrator beyond the fact that its logs will generally be more -interesting. += Configuring Pacemaker = == How Should the Configuration be Updated? == There are three basic rules for updating the cluster configuration: * Rule 1 - Never edit the +cib.xml+ file manually. Ever. I'm not making this up. * Rule 2 - Read Rule 1 again. * Rule 3 - The cluster will notice if you ignored rules 1 & 2 and refuse to use the configuration. Now that it is clear how 'not' to update the configuration, we can begin to explain how you 'should'. === Editing the CIB Using XML === The most powerful tool for modifying the configuration is the +cibadmin+ command. With +cibadmin+, you can query, add, remove, update or replace any part of the configuration. All changes take effect immediately, so there is no need to perform a reload-like operation. The simplest way of using `cibadmin` is to use it to save the current configuration to a temporary file, edit that file with your favorite text or XML editor, and then upload the revised configuration. footnote:[This process might appear to risk overwriting changes that happen after the initial cibadmin call, but pacemaker will reject any update that is "too old". If the CIB is updated in some other fashion after the initial cibadmin, the second cibadmin will be rejected because the version number will be too low.] .Safely using an editor to modify the cluster configuration ====== -------- # cibadmin --query > tmp.xml # vi tmp.xml # cibadmin --replace --xml-file tmp.xml -------- ====== Some of the better XML editors can make use of a Relax NG schema to help make sure any changes you make are valid. The schema describing the configuration can be found in +pacemaker.rng+, which may be deployed in a location such as +/usr/share/pacemaker+ or +/usr/lib/heartbeat+ depending on your operating system and how you installed the software. If you want to modify just one section of the configuration, you can query and replace just that section to avoid modifying any others. .Safely using an editor to modify only the resources section ====== -------- # cibadmin --query --scope resources > tmp.xml # vi tmp.xml # cibadmin --replace --scope resources --xml-file tmp.xml -------- ====== === Quickly Deleting Part of the Configuration === Identify the object you wish to delete by XML tag and id. For example, you might search the CIB for all STONITH-related configuration: .Searching for STONITH-related configuration items ====== ---- # cibadmin -Q | grep stonith ---- ====== If you wanted to delete the +primitive+ tag with id +child_DoFencing+, you would run: ---- # cibadmin --delete --xml-text '' ---- === Updating the Configuration Without Using XML === Most tasks can be performed with one of the other command-line tools provided with pacemaker, avoiding the need to read or edit XML. To enable STONITH for example, one could run: ---- # crm_attribute --name stonith-enabled --update 1 ---- Or, to check whether *somenode* is allowed to run resources, there is: ---- # crm_standby --query --node somenode ---- Or, to find the current location of *my-test-rsc*, one can use: ---- # crm_resource --locate --resource my-test-rsc ---- Examples of using these tools for specific cases will be given throughout this document where appropriate. [[s-config-sandboxes]] == Making Configuration Changes in a Sandbox == Often it is desirable to preview the effects of a series of changes before updating the configuration all at once. For this purpose, we have created `crm_shadow` which creates a "shadow" copy of the configuration and arranges for all the command line tools to use it. To begin, simply invoke `crm_shadow --create` with the name of a configuration to create footnote:[Shadow copies are identified with a name, making it possible to have more than one.], and follow the simple on-screen instructions. [WARNING] ==== Read this section and the on-screen instructions carefully; failure to do so could result in destroying the cluster's active configuration! ==== .Creating and displaying the active sandbox ====== ---- # crm_shadow --create test Setting up shadow instance Type Ctrl-D to exit the crm_shadow shell shadow[test]: shadow[test] # crm_shadow --which test ---- ====== From this point on, all cluster commands will automatically use the shadow copy instead of talking to the cluster's active configuration. Once you have finished experimenting, you can either make the changes active via the `--commit` option, or discard them using the `--delete` option. Again, be sure to follow the on-screen instructions carefully! For a full list of `crm_shadow` options and commands, invoke it with the `--help` option. .Use sandbox to make multiple changes all at once, discard them, and verify real configuration is untouched ====== ---- shadow[test] # crm_failcount -r rsc_c001n01 -G scope=status name=fail-count-rsc_c001n01 value=0 shadow[test] # crm_standby --node c001n02 -v on shadow[test] # crm_standby --node c001n02 -G scope=nodes name=standby value=on shadow[test] # cibadmin --erase --force shadow[test] # cibadmin --query shadow[test] # crm_shadow --delete test --force Now type Ctrl-D to exit the crm_shadow shell shadow[test] # exit # crm_shadow --which No active shadow configuration defined # cibadmin -Q ---- ====== [[s-config-testing-changes]] == Testing Your Configuration Changes == We saw previously how to make a series of changes to a "shadow" copy of the configuration. Before loading the changes back into the cluster (e.g. `crm_shadow --commit mytest --force`), it is often advisable to simulate the effect of the changes with +crm_simulate+. For example: ---- # crm_simulate --live-check -VVVVV --save-graph tmp.graph --save-dotfile tmp.dot ---- This tool uses the same library as the live cluster to show what it would have done given the supplied input. Its output, in addition to a significant amount of logging, is stored in two files +tmp.graph+ and +tmp.dot+. Both files are representations of the same thing: the cluster's response to your changes. The graph file stores the complete transition from the existing cluster state to your desired new state, containing a list of all the actions, their parameters and their pre-requisites. Because the transition graph is not terribly easy to read, the tool also generates a Graphviz footnote:[Graph visualization software. See http://www.graphviz.org/ for details.] dot-file representing the same information. For information on the options supported by `crm_simulate`, use its `--help` option. .Interpreting the Graphviz output * Arrows indicate ordering dependencies * Dashed arrows indicate dependencies that are not present in the transition graph * Actions with a dashed border of any color do not form part of the transition graph * Actions with a green border form part of the transition graph * Actions with a red border are ones the cluster would like to execute but cannot run * Actions with a blue border are ones the cluster does not feel need to be executed * Actions with orange text are pseudo/pretend actions that the cluster uses to simplify the graph * Actions with black text are sent to the LRM * Resource actions have text of the form pass:[rsc]_pass:[action]_pass:[interval] pass:[node] * Any action depending on an action with a red border will not be able to execute. * Loops are _really_ bad. Please report them to the development team. === Small Cluster Transition === image::images/Policy-Engine-small.png["An example transition graph as represented by Graphviz",width="16cm",height="6cm",align="center"] In the above example, it appears that a new node, *pcmk-2*, has come online and that the cluster is checking to make sure *rsc1*, *rsc2* and *rsc3* are not already running there (Indicated by the *rscN_monitor_0* entries). Once it did that, and assuming the resources were not active there, it would have liked to stop *rsc1* and *rsc2* on *pcmk-1* and move them to *pcmk-2*. However, there appears to be some problem and the cluster cannot or is not permitted to perform the stop actions which implies it also cannot perform the start actions. For some reason the cluster does not want to start *rsc3* anywhere. === Complex Cluster Transition === image::images/Policy-Engine-big.png["Another, slightly more complex, transition graph that you're not expected to be able to read",width="16cm",height="20cm",align="center"] == Do I Need to Update the Configuration on All Cluster Nodes? == No. Any changes are immediately synchronized to the other active members of the cluster. To reduce bandwidth, the cluster only broadcasts the incremental updates that result from your changes and uses MD5 checksums to ensure that each copy is completely consistent. + +== Working with CIB Properties == + +Although these fields can be written to by the user, in +most cases the cluster will overwrite any values specified by the +user with the "correct" ones. + +To change the ones that can be specified by the user, +for example +admin_epoch+, one should use: +---- +# cibadmin --modify --xml-text '' +---- + +A complete set of CIB properties will look something like this: + +.Attributes set for a cib object +====== +[source,XML] +------- + +------- +====== + +== Querying and Setting Cluster Options == + +indexterm:[Querying,Cluster Option] +indexterm:[Setting,Cluster Option] +indexterm:[Cluster,Querying Options] +indexterm:[Cluster,Setting Options] + +Cluster options can be queried and modified using the `crm_attribute` tool. To +get the current value of +cluster-delay+, you can run: + +---- +# crm_attribute --query --name cluster-delay +---- + +which is more simply written as + +---- +# crm_attribute -G -n cluster-delay +---- + +If a value is found, you'll see a result like this: + +---- +# crm_attribute -G -n cluster-delay +scope=crm_config name=cluster-delay value=60s +---- + +If no value is found, the tool will display an error: + +---- +# crm_attribute -G -n clusta-deway +scope=crm_config name=clusta-deway value=(null) +Error performing operation: No such device or address +---- + +To use a different value (for example, 30 seconds), simply run: + +---- +# crm_attribute --name cluster-delay --update 30s +---- + +To go back to the cluster's default value, you can delete the value, for example: + +---- +# crm_attribute --name cluster-delay --delete +Deleted crm_config option: id=cib-bootstrap-options-cluster-delay name=cluster-delay +---- + +=== When Options are Listed More Than Once === + +If you ever see something like the following, it means that the option you're modifying is present more than once. + +.Deleting an option that is listed twice +======= +------ +# crm_attribute --name batch-limit --delete + +Multiple attributes match name=batch-limit in crm_config: +Value: 50 (set=cib-bootstrap-options, id=cib-bootstrap-options-batch-limit) +Value: 100 (set=custom, id=custom-batch-limit) +Please choose from one of the matches above and supply the 'id' with --id +------- +======= + +In such cases, follow the on-screen instructions to perform the +requested action. To determine which value is currently being used by +the cluster, refer to the 'Rules' chapter of 'Pacemaker Explained'. + +[[s-remote-connection]] +== Connecting from a Remote Machine == +indexterm:[Cluster,Remote connection] +indexterm:[Cluster,Remote administration] + +Provided Pacemaker is installed on a machine, it is possible to +connect to the cluster even if the machine itself is not in the same +cluster. To do this, one simply sets up a number of environment +variables and runs the same commands as when working on a cluster +node. + +.Environment Variables Used to Connect to Remote Instances of the CIB +[width="95%",cols="1m,1,3<",options="header",align="center"] +|========================================================= + +|Environment Variable +|Default +|Description + +|CIB_user +|$USER +|The user to connect as. Needs to be part of the +haclient+ group on + the target host. + indexterm:[Environment Variable,CIB_user] + +|CIB_passwd +| +|The user's password. Read from the command line if unset. + indexterm:[Environment Variable,CIB_passwd] + +|CIB_server +|localhost +|The host to contact + indexterm:[Environment Variable,CIB_server] + +|CIB_port +| +|The port on which to contact the server; required. + indexterm:[Environment Variable,CIB_port] + +|CIB_encrypted +|TRUE +|Whether to encrypt network traffic + indexterm:[Environment Variable,CIB_encrypted] + +|========================================================= + +So, if *c001n01* is an active cluster node and is listening on port 1234 +for connections, and *someuser* is a member of the *haclient* group, +then the following would prompt for *someuser*'s password and return +the cluster's current configuration: + +---- +# export CIB_port=1234; export CIB_server=c001n01; export CIB_user=someuser; +# cibadmin -Q +---- + +For security reasons, the cluster does not listen for remote +connections by default. If you wish to allow remote access, you need +to set the +remote-tls-port+ (encrypted) or +remote-clear-port+ +(unencrypted) CIB properties (i.e., those kept in the +cib+ tag, like ++num_updates+ and +epoch+). + +.Extra top-level CIB properties for remote access +[width="95%",cols="1m,1,3<",options="header",align="center"] +|========================================================= + +|Field +|Default +|Description + +|remote-tls-port +|_none_ +|Listen for encrypted remote connections on this port. + indexterm:[remote-tls-port,Remote Connection Option] + indexterm:[Remote Connection,Option,remote-tls-port] + +|remote-clear-port +|_none_ +|Listen for plaintext remote connections on this port. + indexterm:[remote-clear-port,Remote Connection Option] + indexterm:[Remote Connection,Option,remote-clear-port] + +|========================================================= + diff --git a/doc/Pacemaker_Explained/en-US/Ap-Install.txt b/doc/Pacemaker_Administration/en-US/Ch-Installing.txt similarity index 88% rename from doc/Pacemaker_Explained/en-US/Ap-Install.txt rename to doc/Pacemaker_Administration/en-US/Ch-Installing.txt index 7de68077cf..dd227b32d8 100644 --- a/doc/Pacemaker_Explained/en-US/Ap-Install.txt +++ b/doc/Pacemaker_Administration/en-US/Ch-Installing.txt @@ -1,107 +1,104 @@ -[appendix] += Installing Cluster Software = -== Installing == - -=== Installing the Software === +== Installing the Software == Most major Linux distributions have pacemaker packages in their standard package repositories, or the software can be built from source code. See the http://clusterlabs.org/wiki/Install[Install wiki page] for details. -=== Enabling Pacemaker === +== Enabling Pacemaker == -==== Enabling Pacemaker For Corosync version 2 and greater ==== +=== Enabling Pacemaker For Corosync version 2 and greater === High-level cluster management tools are available that can configure corosync for you. This document focuses on the lower-level details if you want to configure corosync yourself. Corosync configuration is normally located in +/etc/corosync/corosync.conf+. .Corosync configuration file for two nodes *myhost1* and *myhost2* ==== ---- totem { version: 2 secauth: off cluster_name: mycluster transport: udpu } nodelist { node { ring0_addr: myhost1 nodeid: 1 } node { ring0_addr: myhost2 nodeid: 2 } } quorum { provider: corosync_votequorum two_node: 1 } logging { to_syslog: yes } ---- ==== .Corosync configuration file for three nodes *myhost1*, *myhost2* and *myhost3* ==== ---- totem { version: 2 secauth: off cluster_name: mycluster transport: udpu } nodelist { node { ring0_addr: myhost1 nodeid: 1 } node { ring0_addr: myhost2 nodeid: 2 } node { ring0_addr: myhost3 nodeid: 3 } } quorum { provider: corosync_votequorum } logging { to_syslog: yes } ---- ==== In the above examples, the +totem+ section defines what protocol version and options (including encryption) to use, footnote:[ Please consult the Corosync website (http://www.corosync.org/) and documentation for details on enabling encryption and peer authentication for the cluster. ] and gives the cluster a unique name (+mycluster+ in these examples). -The +node+ section lists the nodes in this cluster. (See <> -for how this affects pacemaker.) +The +node+ section lists the nodes in this cluster. The +quorum+ section defines how the cluster uses quorum. The important thing is that two-node clusters must be handled specially, so +two_node: 1+ must be defined for two-node clusters (and only for two-node clusters). The +logging+ section should be self-explanatory. diff --git a/doc/Pacemaker_Administration/en-US/Ch-Intro.txt b/doc/Pacemaker_Administration/en-US/Ch-Intro.txt new file mode 100644 index 0000000000..60b750761c --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Ch-Intro.txt @@ -0,0 +1,19 @@ += Read-Me-First = + +== The Scope of this Document == + +The purpose of this document is to help system administrators learn how to +manage a Pacemaker cluster. + +System administrators may be interested in other parts of the +https://www.clusterlabs.org/pacemaker/doc/[Pacemaker documentation set], +such as 'Clusters from Scratch', a step-by-step guide to setting up an +example cluster, and 'Pacemaker Explained', an exhaustive reference for +cluster configuration. + +Multiple higher-level tools (both command-line and GUI) are available to +simplify cluster management. However, this document focuses on the lower-level +command-line tools that come with Pacemaker itself. The concepts are applicable +to the higher-level tools, though the syntax would differ. + +include::../../shared/en-US/pacemaker-intro.txt[] diff --git a/doc/Pacemaker_Administration/en-US/Ch-Monitoring.txt b/doc/Pacemaker_Administration/en-US/Ch-Monitoring.txt new file mode 100644 index 0000000000..b9edabae2a --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Ch-Monitoring.txt @@ -0,0 +1,60 @@ += Monitoring a Pacemaker Cluster = + +== Using crm_mon == + +The `crm_mon` utility displays the current state of an active cluster. It can +show the cluster status organized by node or by resource, and can be used in +either single-shot or dynamically updating mode. It can also display operations +performed and information about failures. + +Using this tool, you can examine the state of the cluster for irregularities, +and see how it responds when you cause or simulate failures. + +See the manual page or the output of `crm_mon --help` for a full description of +its many options. + +.Sample output from crm_mon -1 +====== +------- +Stack: corosync +Current DC: node2 (version 2.0.0-1) - partition with quorum +Last updated: Mon Jan 29 12:18:42 2018 +Last change: Mon Jan 29 12:18:40 2018 by root via crm_attribute on node3 + +5 nodes configured +2 resources configured + +Online: [ node1 node2 node3 node4 node5 ] + +Active resources: + +Fencing (stonith:fence_xvm): Started node1 +IP (ocf:heartbeat:IPaddr2): Started node2 +------- +====== + +.Sample output from crm_mon -n -1 +====== +------- +Stack: corosync +Current DC: node2 (version 2.0.0-1) - partition with quorum +Last updated: Mon Jan 29 12:21:48 2018 +Last change: Mon Jan 29 12:18:40 2018 by root via crm_attribute on node3 + +5 nodes configured +2 resources configured + +Node node1: online + Fencing (stonith:fence_xvm): Started +Node node2: online + IP (ocf:heartbeat:IPaddr2): Started +Node node3: online +Node node4: online +Node node5: online +------- +====== + +As mentioned in an earlier chapter, the DC is the node is where decisions are +made. The cluster elects a node to be DC as needed. The only significance of +the choice of DC to an administrator is the fact that its logs will have the +most information about why decisions were made. diff --git a/doc/Pacemaker_Explained/en-US/Ap-Upgrade.txt b/doc/Pacemaker_Administration/en-US/Ch-Upgrading.txt similarity index 93% rename from doc/Pacemaker_Explained/en-US/Ap-Upgrade.txt rename to doc/Pacemaker_Administration/en-US/Ch-Upgrading.txt index 570410b520..a124a06e4a 100644 --- a/doc/Pacemaker_Explained/en-US/Ap-Upgrade.txt +++ b/doc/Pacemaker_Administration/en-US/Ch-Upgrading.txt @@ -1,445 +1,441 @@ -[appendix] += Upgrading a Pacemaker Cluster = -== Upgrading == - -[[ap-upgrade]] -=== Pacemaker Versioning === +== Pacemaker Versioning == Pacemaker has an overall release version, plus separate version numbers for certain internal components. * *Pacemaker release version:* This version consists of three numbers (_x.y.z_). + The major version number (the _x_ in _x.y.z_) increases when at least some rolling upgrades are not possible from the previous major version. For example, a rolling upgrade from 1.0.8 to 1.1.15 should always be supported, but a rolling upgrade from 1.0.8 to 2.0.0 may not be possible. + The minor version (the _y_ in _x.y.z_) increases when there are significant changes in cluster default behavior, tool behavior, and/or the API interface (for software that utilizes Pacemaker libraries). The main benefit is to alert you to pay closer attention to the release notes, to see if you might be affected. + The release counter (the _z_ in _x.y.z_) is increased with all public releases of Pacemaker, which typically include both bug fixes and new features. * *CRM feature set:* This version number applies to the communication between full cluster nodes. + It increases when a cluster node running the older version would have problems if the cluster's Designated Controller (DC) has the newer version. To avoid these problems, Pacemaker ensures that the longest-running node is the DC, and that nodes with an older feature set cannot join the cluster. * *LRMD protocol version:* This version applies to communication between a Pacemaker Remote node and the cluster. It increases when an older cluster node would have problems hosting the connection to a newer Pacemaker Remote node. To avoid these problems, Pacemaker Remote nodes will accept connections only from cluster nodes with the same or newer LRMD protocol version. + Unlike with CRM feature set differences between full cluster nodes, mixed LRMD protocol versions between Pacemaker Remote nodes and full cluster nodes are fine, as long as the Pacemaker Remote nodes have the older version. This can be useful, for example, to host a legacy application in an older operating system version used as a Pacemaker Remote node. * *XML schema version:* Pacemaker’s configuration syntax — what's allowed in the Configuration Information Base (CIB) — has its own version. This allows the configuration syntax to evolve over time while still allowing clusters with older configurations to work without change. -=== Upgrading Cluster Software === +== Upgrading Cluster Software == There are three approaches to upgrading a cluster, each with advantages and disadvantages. .Upgrade Methods [width="95%",cols="s,6*",options="header",align="center"] |========================================================= |Method |Available between all versions |Can be used with Pacemaker Remote nodes |Service outage during upgrade |Service recovery during upgrade |Exercises failover logic |Allows change of messaging layer indexterm:[Cluster,switching between stacks] indexterm:[Changing cluster stack] footnote:[Currently, Corosync version 2 and greater is the only supported cluster stack, but other stacks have been supported by past versions, and may be supported by future versions.] |Complete cluster shutdown indexterm:[upgrade,shutdown] indexterm:[shutdown upgrade] |yes |yes |always |N/A |no |yes |Rolling (node by node) indexterm:[upgrade,rolling] indexterm:[rolling upgrade] |no |yes |always footnote:[Any active resources will be moved off the node being upgraded, so there will be at least a brief outage unless all resources can be migrated "live".] |yes |yes |no |Detach and reattach indexterm:[upgrade,reattach] indexterm:[reattach upgrade] |yes |no |only due to failure |no |no |yes |========================================================= -==== Complete Cluster Shutdown ==== +=== Complete Cluster Shutdown === In this scenario, one shuts down all cluster nodes and resources, then upgrades all the nodes before restarting the cluster. . On each node: .. Shutdown the cluster software (pacemaker and the messaging layer). .. Upgrade the Pacemaker software. This may also include upgrading the messaging layer and/or the underlying operating system. .. Check the configuration with the `crm_verify` tool. . On each node: .. Start the cluster software. Currently, only Corosync version 2 and greater is supported as the cluster layer, but if another stack is supported in the future, the stack does not need to be the same one before the upgrade. One variation of this approach is to build a new cluster on new hosts. This allows the new version to be tested beforehand, and minimizes downtime by having the new nodes ready to be placed in production as soon as the old nodes are shut down. -==== Rolling (node by node) ==== +=== Rolling (node by node) === In this scenario, each node is removed from the cluster, upgraded, and then brought back online, until all nodes are running the newest version. Special considerations when planning a rolling upgrade: * If you plan to upgrade other cluster software -- such as the messaging layer -- at the same time, consult that software's documentation for its compatibility with a rolling upgrade. * If the major version number is changing in the Pacemaker version you are upgrading to, a rolling upgrade may not be possible. Read the new version's release notes (as well the information here) for what limitations may exist. * If the CRM feature set is changing in the Pacemaker version you are upgrading to, you should run a mixed-version cluster only during a small rolling upgrade window. If one of the older nodes drops out of the cluster for any reason, it will not be able to rejoin until it is upgraded. * If the LRMD protocol version is changing, all cluster nodes should be upgraded before upgrading any Pacemaker Remote nodes. See the ClusterLabs wiki's http://clusterlabs.org/wiki/ReleaseCalendar[Release Calendar] to figure out whether the CRM feature set and/or LRMD protocol version changed between the the Pacemaker release versions in your rolling upgrade. To perform a rolling upgrade, on each node in turn: . Put the node into standby mode, and wait for any active resources to be moved cleanly to another node. (This step is optional, but allows you to deal with any resource issues before the upgrade.) . Shutdown the cluster software (pacemaker and the messaging layer) on the node. . Upgrade the Pacemaker software. This may also include upgrading the messaging layer and/or the underlying operating system. . If this is the first node to be upgraded, check the configuration with the `crm_verify` tool. . Start the messaging layer. This must be the same messaging layer (currently only Corosync version 2 and greater is supported) that the rest of the cluster is using. [NOTE] ==== Even if a rolling upgrade from the current version of the cluster to the newest version is not directly possible, it may be possible to perform a rolling upgrade in multiple steps, by upgrading to an intermediate version first. .Version Compatibility Table [width="95%",cols="2*",options="header",align="center"] |========================================================= |Version being Installed |Oldest Compatible Version |Pacemaker 2.y.z |Pacemaker 1.1.11 footnote:[Rolling upgrades from Pacemaker 1.1.z to 2.y.z are possible only if the cluster uses corosync version 2 or greater as its messaging layer, and the Cluster Information Base (CIB) uses schema 1.0 or higher in its validate-with property.] |Pacemaker 1.y.z |Pacemaker 1.0.0 |Pacemaker 0.7.z |Pacemaker 0.6.z |========================================================= ==== -==== Detach and Reattach ==== +=== Detach and Reattach === The reattach method is a variant of a complete cluster shutdown, where the resources are left active and get re-detected when the cluster is restarted. This method may not be used if the cluster contains any Pacemaker Remote nodes. . Tell the cluster to stop managing services. This is required to allow the services to remain active after the cluster shuts down. + ---- # crm_attribute --name maintenance-mode --update true ---- . On each node, shutdown the cluster software (pacemaker and the messaging layer), and upgrade the Pacemaker software. This may also include upgrading the messaging layer. While the underlying operating system may be upgraded at the same time, that will be more likely to cause outages in the detached services (certainly, if a reboot is required). . Check the configuration with the `crm_verify` tool. . On each node, start the cluster software. Currently, only Corosync version 2 and greater is supported as the cluster layer, but if another stack is supported in the future, the stack does not need to be the same one before the upgrade. . Verify that the cluster re-detected all resources correctly. . Allow the cluster to resume managing resources again: + ---- # crm_attribute --name maintenance-mode --delete ---- -=== Upgrading the Configuration === +== Upgrading the Configuration == indexterm:[upgrade,Configuration] indexterm:[Configuration,upgrading] The CIB schema version can change from one Pacemaker version to another. After cluster software is upgraded, the cluster will continue to use the older schema version that it was previously using. This can be useful, for example, when administrators have written tools that modify the configuration, and are based on the older syntax. footnote:[As of Pacemaker 2.0.0, only schema versions pacemaker-1.0 and higher are supported (excluding pacemaker-1.1, which was an experimental schema now known as pacemaker-next).] However, when using an older syntax, new features may be unavailable, and there is a performance impact, since the cluster must do a non-persistent configuration upgrade before each transition. So while using the old syntax is possible, it is not advisable to continue using it indefinitely. Even if you wish to continue using the old syntax, it is a good idea to follow the upgrade procedure outlined below, except for the last step, to ensure that the new software has no problems with your existing configuration (since it will perform much the same task internally). If you are brave, it is sufficient simply to run `cibadmin --upgrade`. A more cautious approach would proceed like this: . Create a shadow copy of the configuration. The later commands will automatically operate on this copy, rather than the live configuration. + ----- # crm_shadow --create shadow ----- . Verify the configuration is valid with the new software (which may be stricter about syntax mistakes, or may have dropped support for deprecated features): indexterm:[Configuration,verify] indexterm:[verify,Configuration] + ----- # crm_verify --live-check ----- . Fix any errors or warnings. . Perform the upgrade: + ----- # cibadmin --upgrade ----- . If this step fails, there are three main possibilities: .. The configuration was not valid to start with (did you do steps 2 and 3?). .. The transformation failed - http://bugs.clusterlabs.org/[report a bug] or mailto:users@clusterlabs.org?subject=Transformation%20failed%20during%20upgrade[email the project]. .. The transformation was successful but produced an invalid result. + If the result of the transformation is invalid, you may see a number of errors from the validation library. If these are not helpful, visit the http://clusterlabs.org/wiki/Validation_FAQ[Validation FAQ wiki page] and/or try the manual upgrade procedure described below. + . Check the changes: + ----- # crm_shadow --diff ----- + If at this point there is anything about the upgrade that you wish to fine-tune (for example, to change some of the automatic IDs), now is the time to do so: + ----- # crm_shadow --edit ----- + This will open the configuration in your favorite editor (whichever is specified by the standard *$EDITOR* environment variable). + . Preview how the cluster will react: + ------ # crm_simulate --live-check --save-dotfile shadow.dot -S # graphviz shadow.dot ------ + Verify that either no resource actions will occur or that you are happy with any that are scheduled. If the output contains actions you do not expect (possibly due to changes to the score calculations), you may need to make further manual changes. See <> for further details on how to interpret the output of `crm_simulate` and `graphviz`. + . Upload the changes: + ----- # crm_shadow --commit shadow --force ----- + In the unlikely event this step fails, please report a bug. [NOTE] ==== indexterm:[Configuration,upgrade manually] It is also possible to perform the configuration upgrade steps manually: . Locate the +upgrade*.xsl+ conversion scripts provided with the source code. These will often be installed in a location such as +/usr/share/pacemaker+, or may be obtained from the https://github.com/ClusterLabs/pacemaker/tree/master/xml[source repository]. . Run the conversion scripts that apply to your older version, for example: indexterm:[XML,convert] + ----- # xsltproc /path/to/upgrade06.xsl config06.xml > config10.xml ----- + . Locate the +pacemaker.rng+ script (from the same location as the xsl files). . Check the XML validity: indexterm:[validate configuration]indexterm:[Configuration,validate XML] + ---- # xmllint --relaxng /path/to/pacemaker.rng config10.xml ---- The advantage of this method is that it can be performed without the cluster running, and any validation errors are often more informative. ==== -=== What Changed in 2.0 === +== What Changed in 2.0 == The main goal of the 2.0 release was to remove support for deprecated syntax, along with some small changes in default configuration behavior and tool behavior. Highlights: * Only Corosync version 2 and greater is now supported as the underlying cluster layer. Support for Heartbeat and Corosync 1 (including CMAN) is removed. * The Pacemaker detail log file is now stored in /var/log/pacemaker/pacemaker.log by default. * The record-pending cluster property now defaults to true, which allows status tools such as crm_mon to show operations that are in progress. * Support for a number of deprecated build options, environment variables, and configuration settings has been removed. * The public API for Pacemaker libraries that software applications can use has changed significantly. For a detailed list of changes, see the release notes and the https://wiki.clusterlabs.org/wiki/Pacemaker_2.0_Changes[Pacemaker 2.0 Changes] page on the ClusterLabs wiki. -=== What Changed in 1.0 === +== What Changed in 1.0 == -==== New ==== +=== New === -* Failure timeouts. See <> -* New section for resource and operation defaults. See <> and <> -* Tool for making offline configuration changes. See <> -* +Rules, instance_attributes, meta_attributes+ and sets of operations can be defined once and referenced in multiple places. See <> +* Failure timeouts. +* New section for resource and operation defaults. +* Tool for making offline configuration changes. +* +Rules, instance_attributes, meta_attributes+ and sets of operations can be defined once and referenced in multiple places. * The CIB now accepts XPath-based create/modify/delete operations. See the pass:[cibadmin] help text. -* Multi-dimensional colocation and ordering constraints. See <> and <> -* The ability to connect to the CIB from non-cluster machines. See <> -* Allow recurring actions to be triggered at known times. See <> +* Multi-dimensional colocation and ordering constraints. +* The ability to connect to the CIB from non-cluster machines. +* Allow recurring actions to be triggered at known times. -==== Changed ==== +=== Changed === * Syntax ** All resource and cluster options now use dashes (-) instead of underscores (_) ** +master_slave+ was renamed to +master+ ** The +attributes+ container tag was removed ** The operation field +pre-req+ has been renamed +requires+ ** All operations must have an +interval+, +start+/+stop+ must have it set to zero * The +stonith-enabled+ option now defaults to true. * The cluster will refuse to start resources if +stonith-enabled+ is true (or unset) and no STONITH resources have been defined -* The attributes of colocation and ordering constraints were renamed for clarity. See <> and <> -* +resource-failure-stickiness+ has been replaced by +migration-threshold+. See <> +* The attributes of colocation and ordering constraints were renamed for clarity. +* +resource-failure-stickiness+ has been replaced by +migration-threshold+. * The parameters for command-line tools have been made consistent * Switched to 'RelaxNG' schema validation and 'libxml2' parser ** id fields are now XML IDs which have the following limitations: *** id's cannot contain colons (:) *** id's cannot begin with a number *** id's must be globally unique (not just unique for that tag) ** Some fields (such as those in constraints that refer to resources) are IDREFs. + This means that they must reference existing resources or objects in order for the configuration to be valid. Removing an object which is referenced elsewhere will therefore fail. + ** The CIB representation, from which a MD5 digest is calculated to verify CIBs on the nodes, has changed. + This means that every CIB update will require a full refresh on any upgraded nodes until the cluster is fully upgraded to 1.0. This will result in significant performance degradation and it is therefore highly inadvisable to run a mixed 1.0/0.6 cluster for any longer than absolutely necessary. + * Ping node information no longer needs to be added to _ha.cf_. + Simply include the lists of hosts in your ping resource(s). -==== Removed ==== +=== Removed === * Syntax ** It is no longer possible to set resource meta options as top-level attributes. Use meta attributes instead. ** Resource and operation defaults are no longer read from - +crm_config+. See <> and - <> instead. + +crm_config+. diff --git a/doc/Pacemaker_Administration/en-US/Pacemaker_Administration.ent b/doc/Pacemaker_Administration/en-US/Pacemaker_Administration.ent new file mode 100644 index 0000000000..d9e752b636 --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Pacemaker_Administration.ent @@ -0,0 +1,4 @@ + + + + diff --git a/doc/Pacemaker_Administration/en-US/Pacemaker_Administration.xml b/doc/Pacemaker_Administration/en-US/Pacemaker_Administration.xml new file mode 100644 index 0000000000..07a6b77ddc --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Pacemaker_Administration.xml @@ -0,0 +1,18 @@ + + +%BOOK_ENTITIES; +]> + + + + + + + + + + + + + diff --git a/doc/Pacemaker_Administration/en-US/Preface.xml b/doc/Pacemaker_Administration/en-US/Preface.xml new file mode 100644 index 0000000000..e63c4be5b2 --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Preface.xml @@ -0,0 +1,11 @@ + + +Preface + + + + + + + diff --git a/doc/Pacemaker_Administration/en-US/Revision_History.xml b/doc/Pacemaker_Administration/en-US/Revision_History.xml new file mode 100644 index 0000000000..56d3c70687 --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/Revision_History.xml @@ -0,0 +1,28 @@ + + +%BOOK_ENTITIES; +]> + + + Revision History + + + + + 1-0 + Tue Jan 23 2018 + + KenGaillot + kgaillot@redhat.com + + + Move administration-oriented information from + Pacemaker Explained into its own + book + + + + + + diff --git a/doc/Pacemaker_Administration/en-US/images b/doc/Pacemaker_Administration/en-US/images new file mode 120000 index 0000000000..963300dd95 --- /dev/null +++ b/doc/Pacemaker_Administration/en-US/images @@ -0,0 +1 @@ +../../shared/en-US/images \ No newline at end of file diff --git a/doc/Pacemaker_Administration/publican.cfg.in b/doc/Pacemaker_Administration/publican.cfg.in new file mode 100644 index 0000000000..c889e16a46 --- /dev/null +++ b/doc/Pacemaker_Administration/publican.cfg.in @@ -0,0 +1,11 @@ +docname: Pacemaker_Administration +xml_lang: en-US +#edition: 1 +type: Book +version: @PACKAGE_SERIES@ +brand: @PUBLICAN_BRAND@ +product: Pacemaker + +chunk_first: 0 +chunk_section_depth: 3 +generate_section_toc_level: 4 diff --git a/doc/Pacemaker_Development/en-US/Ch-Coding.txt b/doc/Pacemaker_Development/en-US/Ch-Coding.txt index 188830e2df..ecb228ae39 100644 --- a/doc/Pacemaker_Development/en-US/Ch-Coding.txt +++ b/doc/Pacemaker_Development/en-US/Ch-Coding.txt @@ -1,192 +1,198 @@ = C Coding Guidelines = //// We prefer [[ch-NAME]], but older versions of asciidoc don't deal well with that construct for chapter headings //// anchor:ch-c-coding[Chapter 2, C Coding Guidelines] == C Boilerplate == indexterm:[C,boilerplate] indexterm:[licensing,C boilerplate] Every C file should start like this: ==== [source,C] ---- /* - * Copyright (C) Andrew Beekhof + * Copyright Andrew Beekhof * * This source code is licensed under WITHOUT ANY WARRANTY. */ ---- ==== -++ is the year the code was 'originally' created (it is the most -important date for copyright purposes, as it establishes priority and -the point from which expiration is calculated). If the code is modified -in later years, add +-YYYY+ with the most recent year of modification. +++ is the year the code was 'originally' created. +footnote:[ +See the U.S. Copyright Office's https://www.copyright.gov/comp3/["Compendium +of U.S. Copyright Office Practices"], particularly "Chapter 2200: Notice of +Copyright", sections 2205.1(A) and 2205.1(F), or +https://techwhirl.com/updating-copyright-notices/["Updating Copyright +Notices"] for a more readable summary. +] +If the code is modified in later years, add +-YYYY+ with the most recent year +of modification. ++ should follow the policy set forth in the https://github.com/ClusterLabs/pacemaker/blob/master/COPYING[+COPYING+] file, generally one of "GNU General Public License version 2 or later (GPLv2+)" or "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)". == Formatting == === Whitespace === indexterm:[C,whitespace] - Indentation must be 4 spaces, no tabs. - Do not leave trailing whitespace. === Line Length === - Lines should be no longer than 80 characters unless limiting line length significantly impacts readability. === Pointers === indexterm:[C,pointers] - The +*+ goes by the variable name, not the type: ==== [source,C] ---- char *foo; ---- ==== - Use a space before the +*+ and after the closing parenthesis in a cast: ==== [source,C] ---- char *foo = (char *) bar; ---- ==== === Functions === indexterm:[C,functions] - In the function definition, put the return type on its own line, and place the opening brace by itself on a line: ==== [source,C] ---- static int foo(void) { ---- ==== - For functions with enough arguments that they must break to the next line, align arguments with the first argument: ==== [source,C] ---- static int function_name(int bar, const char *a, const char *b, const char *c, const char *d) { ---- ==== - If a function name gets really long, start the arguments on their own line with 8 spaces of indentation: ==== [source,C] ---- static int really_really_long_function_name_this_is_getting_silly_now( int bar, const char *a, const char *b, const char *c, const char *d) { ---- ==== === Control Statements (if, else, while, for, switch) === - The keyword is followed by one space, then left parenthesis without space, condition, right parenthesis, space, opening bracket on the same line. +else+ and +else if+ are on the same line with the ending brace and opening brace, separated by a space: ==== [source,C] ---- if (condition1) { statement1; } else if (condition2) { statement2; } else { statement3; } ---- ==== - In a +switch+ statement, +case+ is indented one level, and the body of each +case+ is indented by another level. The opening brace is on the same line as +switch+. ==== [source,C] ---- switch (expression) { case 0: command1; break; case 1: command2; break; default: command3; } ---- ==== === Operators === indexterm:[C,operators] - Operators have spaces from both sides. Do not rely on operator precedence; use parentheses when mixing operators with different priority. - No space is used after opening parenthesis and before closing parenthesis. ==== [source,C] ---- x = a + b - (c * d); ---- ==== == Naming Conventions == indexterm:[C,naming] - Any exposed symbols in libraries (non-+static+ function names, type names, etc.) must begin with a prefix appropriate to the library, for example, +crm_+, +pe_+, +st_+, +lrm_+. == vim Settings == indexterm:[vim] Developers who use +vim+ to edit source code can add the following settings to their +~/.vimrc+ file to follow Pacemaker C coding guidelines: ---- " follow Pacemaker coding guidelines when editing C source code files filetype plugin indent on au FileType c setlocal expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=80 autocmd BufNewFile,BufRead *.h set filetype=c let c_space_errors = 1 ---- diff --git a/doc/Pacemaker_Development/en-US/Ch-Python.txt b/doc/Pacemaker_Development/en-US/Ch-Python.txt index a8a7865f4f..9733bcd22e 100644 --- a/doc/Pacemaker_Development/en-US/Ch-Python.txt +++ b/doc/Pacemaker_Development/en-US/Ch-Python.txt @@ -1,138 +1,153 @@ = Python Coding Guidelines = //// We prefer [[ch-NAME]], but older versions of asciidoc don't deal well with that construct for chapter headings //// anchor:ch-python-coding[Chapter 3, Python Coding Guidelines] [[s-python-boilerplate]] == Python Boilerplate == indexterm:[Python,boilerplate] indexterm:[licensing,Python boilerplate] -Every Python file should start like this: +If a Python file is meant to be executed (as opposed to imported), it should +have a +.in+ extension, and its first line should be: +==== +---- +#!@PYTHON@ +---- +==== +which will be replaced with the appropriate python executable when Pacemaker is +built. + +After the above line if any, every Python file should start like this: ==== [source,Python] ---- -[] """ """ # Pacemaker targets compatibility with Python 2.7 and 3.2+ from __future__ import print_function, unicode_literals, absolute_import, division -__copyright__ = "Copyright (C) Andrew Beekhof " +__copyright__ = "Copyright Andrew Beekhof " __license__ = " WITHOUT ANY WARRANTY" ---- ==== If the file is meant to be directly executed, the first line (++) should be +#!/usr/bin/python+. If it is meant to be imported, omit this line. ++ is obviously a brief description of the file's purpose. The string may contain any other information typically used in a Python file https://www.python.org/dev/peps/pep-0257/[docstring]. The +import+ statement is discussed further in <>. -++ is the year the code was 'originally' created (it is the most -important date for copyright purposes, as it establishes priority and -the point from which expiration is calculated). If the code is modified -in later years, add +-YYYY+ with the most recent year of modification. +++ is the year the code was 'originally' created. +footnote:[ +See the U.S. Copyright Office's https://www.copyright.gov/comp3/["Compendium +of U.S. Copyright Office Practices"], particularly "Chapter 2200: Notice of +Copyright", sections 2205.1(A) and 2205.1(F), or +https://techwhirl.com/updating-copyright-notices/["Updating Copyright +Notices"] for a more readable summary. +] +If the code is modified in later years, add +-YYYY+ with the most recent year +of modification. ++ should follow the policy set forth in the https://github.com/ClusterLabs/pacemaker/blob/master/COPYING[+COPYING+] file, generally one of "GNU General Public License version 2 or later (GPLv2+)" or "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)". == Python Compatibility == indexterm:[Python,2] indexterm:[Python,3] indexterm:[Python,versions] Pacemaker targets compatibility with Python 2.7, and Python 3.2 and later. These versions have added features to be more compatible with each other, allowing us to support both the 2 and 3 series with the same code. It is a good idea to test any changes with both Python 2 and 3. [[s-python-future-imports]] === Python Future Imports === The future imports used in <> mean: * All print statements must use parentheses, and printing without a newline is accomplished with the +end=' '+ parameter rather than a trailing comma. * All string literals will be treated as Unicode (the +u+ prefix is unnecessary, and must not be used, because it is not available in Python 3.2). * Local modules must be imported using +from . import+ (rather than just +import+). To import one item from a local module, use +from .modulename import+ (rather than +from modulename import+). * Division using +/+ will always return a floating-point result (use +//+ if you want the integer floor instead). === Other Python Compatibility Requirements === * When specifying an exception variable, always use +as+ instead of a comma (e.g. +except Exception as e+ or +except (TypeError, IOError) as e+). Use +e.args+ to access the error arguments (instead of iterating over or subscripting +e+). * Use +in+ (not +has_key()+) to determine if a dictionary has a particular key. * Always use the I/O functions from the +io+ module rather than the native I/O functions (e.g. +io.open()+ rather than +open()+). * When opening a file, always use the +t+ (text) or +b+ (binary) mode flag. * When creating classes, always specify a parent class to ensure that it is a "new-style" class (e.g. +class Foo(object):+ rather than +class Foo:+) * Be aware of the bytes type added in Python 3. Many places where strings are used in Python 2 use bytes or bytearrays in Python 3 (for example, the pipes used with +subprocess.Popen()+). Code should handle both possibilities. * Be aware that the +items()+, +keys()+, and +values()+ methods of dictionaries return lists in Python 2 and views in Python 3. In many case, no special handling is required, but if the code needs to use list methods on the result, cast the result to list first. * Do not raise or catch strings as exceptions (e.g. +raise "Bad thing"+). * Do not use the +cmp+ parameter of sorting functions (use +key+ instead, if needed) or the +$$__cmp__()$$+ method of classes (implement rich comparison methods such as +$$__lt__()$$+ instead, if needed). * Do not use the +buffer+ type. * Do not use features not available in all targeted Python versions. Common examples include: ** The +html+, +ipaddress+, and +UserDict+ modules ** The +subprocess.run()+ function ** The +subprocess.DEVNULL+ constant ** +subprocess+ module-specific exceptions === Python Usages to Avoid === Avoid the following if possible, otherwise research the compatibility issues involved (hacky workarounds are often available): * long integers * octal integer literals * mixed binary and string data in one data file or variable * metaclasses * +locale.strcoll+ and +locale.strxfrm+ * the +configparser+ and +ConfigParser+ modules * importing compatibility modules such as +six+ (so we don't have to add them to Pacemaker's dependencies) == Formatting Python Code == indexterm:[Python,formatting] * Indentation must be 4 spaces, no tabs. * Do not leave trailing whitespace. * Lines should be no longer than 80 characters unless limiting line length significantly impacts readability. For Python, this limitation is flexible since breaking a line often impacts readability, but definitely keep it under 120 characters. * Where not conflicting with this style guide, it is recommended (but not required) to follow https://www.python.org/dev/peps/pep-0008/[PEP 8]. * It is recommended (but not required) to format Python code such that `pylint --disable=line-too-long,too-many-lines,too-many-instance-attributes,too-many-arguments,too-many-statements` produces minimal complaints (even better if you don't need to disable all those checks). diff --git a/doc/Pacemaker_Development/en-US/images b/doc/Pacemaker_Development/en-US/images new file mode 120000 index 0000000000..963300dd95 --- /dev/null +++ b/doc/Pacemaker_Development/en-US/images @@ -0,0 +1 @@ +../../shared/en-US/images \ No newline at end of file diff --git a/doc/Pacemaker_Development/en-US/images/.keep b/doc/Pacemaker_Development/en-US/images/.keep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/doc/Pacemaker_Explained/en-US/Ap-LSB.txt b/doc/Pacemaker_Explained/en-US/Ap-LSB.txt deleted file mode 100644 index 479ac55187..0000000000 --- a/doc/Pacemaker_Explained/en-US/Ap-LSB.txt +++ /dev/null @@ -1,81 +0,0 @@ -[appendix] - -[[ap-lsb]] -== Init Script LSB Compliance == - -The relevant part of the -http://refspecs.linuxfoundation.org/lsb.shtml[LSB specifications] -includes a description of all the return codes listed here. - -Assuming `some_service` is configured correctly and currently -inactive, the following sequence will help you determine if it is -LSB-compatible: - -. Start (stopped): -+ ----- -# /etc/init.d/some_service start ; echo "result: $?" ----- -+ - .. Did the service start? - .. Did the command print *result: 0* (in addition to its usual output)? -+ -. Status (running): -+ ----- -# /etc/init.d/some_service status ; echo "result: $?" ----- -+ - .. Did the script accept the command? - .. Did the script indicate the service was running? - .. Did the command print *result: 0* (in addition to its usual output)? -+ -. Start (running): -+ ----- -# /etc/init.d/some_service start ; echo "result: $?" ----- -+ - .. Is the service still running? - .. Did the command print *result: 0* (in addition to its usual output)? -+ -. Stop (running): -+ ----- -# /etc/init.d/some_service stop ; echo "result: $?" ----- -+ - .. Was the service stopped? - .. Did the command print *result: 0* (in addition to its usual output)? -+ -. Status (stopped): -+ ----- -# /etc/init.d/some_service status ; echo "result: $?" ----- -+ - .. Did the script accept the command? - .. Did the script indicate the service was not running? - .. Did the command print *result: 3* (in addition to its usual output)? -+ -. Stop (stopped): -+ ----- -# /etc/init.d/some_service stop ; echo "result: $?" ----- -+ - .. Is the service still stopped? - .. Did the command print *result: 0* (in addition to its usual output)? -+ -. Status (failed): -+ -.. This step is not readily testable and relies on manual inspection of the script. -+ -The script can use one of the error codes (other than 3) listed in the -LSB spec to indicate that it is active but failed. This tells the -cluster that before moving the resource to another node, it needs to -stop it on the existing one first. - -If the answer to any of the above questions is no, then the script is -not LSB-compliant. Your options are then to either fix the script or -write an OCF agent based on the existing script. diff --git a/doc/Pacemaker_Explained/en-US/Ch-Advanced-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Advanced-Options.txt index 2c39af1799..a1b5debbae 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Advanced-Options.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Advanced-Options.txt @@ -1,813 +1,728 @@ = Advanced Configuration = -[[s-remote-connection]] -== Connecting from a Remote Machine == -indexterm:[Cluster,Remote connection] -indexterm:[Cluster,Remote administration] - -Provided Pacemaker is installed on a machine, it is possible to -connect to the cluster even if the machine itself is not in the same -cluster. To do this, one simply sets up a number of environment -variables and runs the same commands as when working on a cluster -node. - -.Environment Variables Used to Connect to Remote Instances of the CIB -[width="95%",cols="1m,1,3<",options="header",align="center"] -|========================================================= - -|Environment Variable -|Default -|Description - -|CIB_user -|$USER -|The user to connect as. Needs to be part of the +haclient+ group on - the target host. - indexterm:[Environment Variable,CIB_user] - -|CIB_passwd -| -|The user's password. Read from the command line if unset. - indexterm:[Environment Variable,CIB_passwd] - -|CIB_server -|localhost -|The host to contact - indexterm:[Environment Variable,CIB_server] - -|CIB_port -| -|The port on which to contact the server; required. - indexterm:[Environment Variable,CIB_port] - -|CIB_encrypted -|TRUE -|Whether to encrypt network traffic - indexterm:[Environment Variable,CIB_encrypted] - -|========================================================= - -So, if *c001n01* is an active cluster node and is listening on port 1234 -for connections, and *someuser* is a member of the *haclient* group, -then the following would prompt for *someuser*'s password and return -the cluster's current configuration: - ----- -# export CIB_port=1234; export CIB_server=c001n01; export CIB_user=someuser; -# cibadmin -Q ----- - -For security reasons, the cluster does not listen for remote -connections by default. If you wish to allow remote access, you need -to set the +remote-tls-port+ (encrypted) or +remote-clear-port+ -(unencrypted) CIB properties (i.e., those kept in the +cib+ tag, like -+num_updates+ and +epoch+). - -.Extra top-level CIB properties for remote access -[width="95%",cols="1m,1,3<",options="header",align="center"] -|========================================================= - -|Field -|Default -|Description - -|remote-tls-port -|_none_ -|Listen for encrypted remote connections on this port. - indexterm:[remote-tls-port,Remote Connection Option] - indexterm:[Remote Connection,Option,remote-tls-port] - -|remote-clear-port -|_none_ -|Listen for plaintext remote connections on this port. - indexterm:[remote-clear-port,Remote Connection Option] - indexterm:[Remote Connection,Option,remote-clear-port] - -|========================================================= - [[s-recurring-start]] == Specifying When Recurring Actions are Performed == By default, recurring actions are scheduled relative to when the resource started. So if your resource was last started at 14:32 and you have a backup set to be performed every 24 hours, then the backup will always run in the middle of the business day -- hardly desirable. To specify a date and time that the operation should be relative to, set the operation's +interval-origin+. The cluster uses this point to calculate the correct +start-delay+ such that the operation will occur at _origin + (interval * N)_. So, if the operation's interval is 24h, its interval-origin is set to 02:00 and it is currently 14:32, then the cluster would initiate the operation with a start delay of 11 hours and 28 minutes. If the resource is moved to another node before 2am, then the operation is cancelled. The value specified for +interval+ and +interval-origin+ can be any date/time conforming to the http://en.wikipedia.org/wiki/ISO_8601[ISO8601 standard]. By way of example, to specify an operation that would run on the first Monday of 2009 and every Monday after that, you would add: .Specifying a Base for Recurring Action Intervals ===== [source,XML] ===== [[s-failure-handling]] == Handling Resource Failure == By default, Pacemaker will attempt to recover failed resources by restarting them. However, failure recovery is highly configurable. === Failure Counts === Pacemaker tracks resource failures for each combination of node, resource, and operation (start, stop, monitor, etc.). You can query the fail count for a particular node, resource, and/or operation using the `crm_failcount` command. For example, to see how many times the 10-second monitor for +myrsc+ has failed on +node1+, run: ---- # crm_failcount --query -r myrsc -N node1 -n monitor -I 10s ---- If you omit the node, `crm_failcount` will use the local node. If you omit the operation and interval, `crm_failcount` will display the sum of the fail counts for all operations on the resource. You can use `crm_resource --cleanup` or `crm_failcount --delete` to clear fail counts. For example, to clear the above monitor failures, run: ---- # crm_resource --cleanup -r myrsc -N node1 -n monitor -I 10s ---- If you omit the resource, `crm_resource --cleanup` will clear failures for all resources. If you omit the node, it will clear failures on all nodes. If you omit the operation and interval, it will clear the failures for all operations on the resource. [NOTE] ==== Even when cleaning up only a single operation, all failed operations will disappear from the status display. This allows us to trigger a re-check of the resource's current status. ==== Higher-level tools may provide other commands for querying and clearing fail counts. The `crm_mon` tool shows the current cluster status, including any failed operations. To see the current fail counts for any failed resources, call `crm_mon` with the `--failcounts` option. This shows the fail counts per resource (that is, the sum of any operation fail counts for the resource). === Failure Response === Normally, if a running resource fails, pacemaker will try to stop it and start it again. Pacemaker will choose the best location to start it each time, which may be the same node that it failed on. However, if a resource fails repeatedly, it is possible that there is an underlying problem on that node, and you might desire trying a different node in such a case. Pacemaker allows you to set your preference via the +migration-threshold+ resource meta-attribute. footnote:[ The naming of this option was perhaps unfortunate as it is easily confused with live migration, the process of moving a resource from one node to another without stopping it. Xen virtual guests are the most common example of resources that can be migrated in this manner. ] If you define +migration-threshold=pass:[N]+ for a resource, it will be banned from the original node after 'N' failures. [NOTE] ==== The +migration-threshold+ is per 'resource', even though fail counts are tracked per 'operation'. The operation fail counts are added together to compare against the +migration-threshold+. ==== By default, fail counts remain until manually cleared by an administrator using `crm_resource --cleanup` or `crm_failcount --delete` (hopefully after first fixing the failure's cause). It is possible to have fail counts expire automatically by setting the +failure-timeout+ resource meta-attribute. [IMPORTANT] ==== A successful operation does not clear past failures. If a recurring monitor operation fails once, succeeds many times, then fails again days later, its fail count is 2. Fail counts are cleared only by manual intervention or falure timeout. ==== For example, a setting of +migration-threshold=2+ and +failure-timeout=60s+ would cause the resource to move to a new node after 2 failures, and allow it to move back (depending on stickiness and constraint scores) after one minute. [NOTE] ==== +failure-timeout+ is measured since the most recent failure. That is, older failures do not individually time out and lower the fail count. Instead, all failures are timed out simultaneously (and the fail count is reset to 0) if there is no new failure for the timeout period. ==== There are two exceptions to the migration threshold concept: when a resource either fails to start or fails to stop. If the cluster property +start-failure-is-fatal+ is set to +true+ (which is the default), start failures cause the fail count to be set to +INFINITY+ and thus always cause the resource to move immediately. Stop failures are slightly different and crucial. If a resource fails to stop and STONITH is enabled, then the cluster will fence the node in order to be able to start the resource elsewhere. If STONITH is not enabled, then the cluster has no way to continue and will not try to start the resource elsewhere, but will try to stop it again after the failure timeout. [IMPORTANT] Please read <> to understand how timeouts work before configuring a +failure-timeout+. == Moving Resources == indexterm:[Moving,Resources] indexterm:[Resource,Moving] === Moving Resources Manually === There are primarily two occasions when you would want to move a resource from its current location: when the whole node is under maintenance, and when a single resource needs to be moved. ==== Standby Mode ==== Since everything eventually comes down to a score, you could create constraints for every resource to prevent them from running on one node. While pacemaker configuration can seem convoluted at times, not even we would require this of administrators. Instead, one can set a special node attribute which tells the cluster "don't let anything run here". There is even a helpful tool to help query and set it, called `crm_standby`. To check the standby status of the current machine, run: ---- # crm_standby -G ---- A value of +on+ indicates that the node is _not_ able to host any resources, while a value of +off+ says that it _can_. You can also check the status of other nodes in the cluster by specifying the `--node` option: ---- # crm_standby -G --node sles-2 ---- To change the current node's standby status, use `-v` instead of `-G`: ---- # crm_standby -v on ---- Again, you can change another host's value by supplying a hostname with `--node`. ==== Moving One Resource ==== When only one resource is required to move, we could do this by creating location constraints. However, once again we provide a user-friendly shortcut as part of the `crm_resource` command, which creates and modifies the extra constraints for you. If +Email+ were running on +sles-1+ and you wanted it moved to a specific location, the command would look something like: ---- # crm_resource -M -r Email -H sles-2 ---- Behind the scenes, the tool will create the following location constraint: [source,XML] It is important to note that subsequent invocations of `crm_resource -M` are not cumulative. So, if you ran these commands ---- # crm_resource -M -r Email -H sles-2 # crm_resource -M -r Email -H sles-3 ---- then it is as if you had never performed the first command. To allow the resource to move back again, use: ---- # crm_resource -U -r Email ---- Note the use of the word _allow_. The resource can move back to its original location but, depending on +resource-stickiness+, it might stay where it is. To be absolutely certain that it moves back to +sles-1+, move it there before issuing the call to `crm_resource -U`: ---- # crm_resource -M -r Email -H sles-1 # crm_resource -U -r Email ---- Alternatively, if you only care that the resource should be moved from its current location, try: ---- # crm_resource -B -r Email ---- Which will instead create a negative constraint, like [source,XML] This will achieve the desired effect, but will also have long-term consequences. As the tool will warn you, the creation of a +-INFINITY+ constraint will prevent the resource from running on that node until `crm_resource -U` is used. This includes the situation where every other cluster node is no longer available! In some cases, such as when +resource-stickiness+ is set to +INFINITY+, it is possible that you will end up with the problem described in <>. The tool can detect some of these cases and deals with them by creating both positive and negative constraints. E.g. +Email+ prefers +sles-1+ with a score of +-INFINITY+ +Email+ prefers +sles-2+ with a score of +INFINITY+ which has the same long-term consequences as discussed earlier. === Moving Resources Due to Connectivity Changes === You can configure the cluster to move resources when external connectivity is lost in two steps. ==== Tell Pacemaker to Monitor Connectivity ==== First, add an *ocf:pacemaker:ping* resource to the cluster. The *ping* resource uses the system utility of the same name to a test whether list of machines (specified by DNS hostname or IPv4/IPv6 address) are reachable and uses the results to maintain a node attribute called +pingd+ by default. footnote:[ The attribute name is customizable, in order to allow multiple ping groups to be defined. ] [NOTE] =========== Older versions of Pacemaker used a different agent *ocf:pacemaker:pingd* which is now deprecated in favor of *ping*. If your version of Pacemaker does not contain the *ping* resource agent, download the latest version from https://github.com/ClusterLabs/pacemaker/tree/master/extra/resources/ping =========== Normally, the ping resource should run on all cluster nodes, which means that you'll need to create a clone. A template for this can be found below along with a description of the most interesting parameters. .Common Options for a 'ping' Resource [width="95%",cols="1m,4<",options="header",align="center"] |========================================================= |Field |Description |dampen |The time to wait (dampening) for further changes to occur. Use this to prevent a resource from bouncing around the cluster when cluster nodes notice the loss of connectivity at slightly different times. indexterm:[dampen,Ping Resource Option] indexterm:[Ping Resource,Option,dampen] |multiplier |The number of connected ping nodes gets multiplied by this value to get a score. Useful when there are multiple ping nodes configured. indexterm:[multiplier,Ping Resource Option] indexterm:[Ping Resource,Option,multiplier] |host_list |The machines to contact in order to determine the current connectivity status. Allowed values include resolvable DNS host names, IPv4 and IPv6 addresses. indexterm:[host_list,Ping Resource Option] indexterm:[Ping Resource,Option,host_list] |========================================================= .An example ping cluster resource that checks node connectivity once every minute ===== [source,XML] ------------ ------------ ===== [IMPORTANT] =========== You're only half done. The next section deals with telling Pacemaker how to deal with the connectivity status that +ocf:pacemaker:ping+ is recording. =========== ==== Tell Pacemaker How to Interpret the Connectivity Data ==== [IMPORTANT] ====== Before attempting the following, make sure you understand <>. ====== There are a number of ways to use the connectivity data. The most common setup is for people to have a single ping target (e.g. the service network's default gateway), to prevent the cluster from running a resource on any unconnected node. .Don't run a resource on unconnected nodes ===== [source,XML] ------- ------- ===== A more complex setup is to have a number of ping targets configured. You can require the cluster to only run resources on nodes that can connect to all (or a minimum subset) of them. .Run only on nodes connected to three or more ping targets. ===== [source,XML] ------- ... ... ... ------- ===== Alternatively, you can tell the cluster only to _prefer_ nodes with the best connectivity. Just be sure to set +multiplier+ to a value higher than that of +resource-stickiness+ (and don't set either of them to +INFINITY+). .Prefer the node with the most connected ping nodes ===== [source,XML] ------- ------- ===== It is perhaps easier to think of this in terms of the simple constraints that the cluster translates it into. For example, if *sles-1* is connected to all five ping nodes but *sles-2* is only connected to two, then it would be as if you instead had the following constraints in your configuration: .How the cluster translates the above location constraint ===== [source,XML] ------- ------- ===== The advantage is that you don't have to manually update any constraints whenever your network connectivity changes. You can also combine the concepts above into something even more complex. The example below shows how you can prefer the node with the most connected ping nodes provided they have connectivity to at least three (again assuming that +multiplier+ is set to 1000). .A more complex example of choosing a location based on connectivity ===== [source,XML] ------- ------- ===== [[s-migrating-resources]] === Migrating Resources === Normally, when the cluster needs to move a resource, it fully restarts the resource (i.e. stops the resource on the current node and starts it on the new node). However, some types of resources, such as Xen virtual guests, are able to move to another location without loss of state (often referred to as live migration or hot migration). In pacemaker, this is called resource migration. Pacemaker can be configured to migrate a resource when moving it, rather than restarting it. Not all resources are able to migrate; see the Migration Checklist below, and those that can, won't do so in all situations. Conceptually, there are two requirements from which the other prerequisites follow: * The resource must be active and healthy at the old location; and * everything required for the resource to run must be available on both the old and new locations. The cluster is able to accommodate both 'push' and 'pull' migration models by requiring the resource agent to support two special actions: +migrate_to+ (performed on the current location) and +migrate_from+ (performed on the destination). In push migration, the process on the current location transfers the resource to the new location where is it later activated. In this scenario, most of the work would be done in the +migrate_to+ action and, if anything, the activation would occur during +migrate_from+. Conversely for pull, the +migrate_to+ action is practically empty and +migrate_from+ does most of the work, extracting the relevant resource state from the old location and activating it. There is no wrong or right way for a resource agent to implement migration, as long as it works. .Migration Checklist * The resource may not be a clone. * The resource must use an OCF style agent. * The resource must not be in a failed or degraded state. * The resource agent must support +migrate_to+ and +migrate_from+ actions, and advertise them in its metadata. * The resource must have the +allow-migrate+ meta-attribute set to +true+ (which is not the default). If an otherwise migratable resource depends on another resource via an ordering constraint, there are special situations in which it will be restarted rather than migrated. For example, if the resource depends on a clone, and at the time the resource needs to be moved, the clone has instances that are stopping and instances that are starting, then the resource will be restarted. The Policy Engine is not yet able to model this situation correctly and so takes the safer (if less optimal) path. Also, if a migratable resource depends on a non-migratable resource, and both need to be moved, the migratable resource will be restarted. [[s-node-health]] == Tracking Node Health == A node may be functioning adequately as far as cluster membership is concerned, and yet be "unhealthy" in some respect that makes it an undesirable location for resources. For example, a disk drive may be reporting SMART errors, or the CPU may be highly loaded. Pacemaker offers a way to automatically move resources off unhealthy nodes. === Node Health Attributes === Pacemaker will treat any node attribute whose name starts with +#health+ as an indicator of node health. Node health attributes may have one of the following values: .Allowed Values for Node Health Attributes [width="95%",cols="1,3<",options="header",align="center"] |========================================================= |Value |Intended significance |+red+ |This indicator is unhealthy indexterm:[Node health,red] |+yellow+ |This indicator is becoming unhealthy indexterm:[Node health,yellow] |+green+ |This indicator is healthy indexterm:[Node health,green] |'integer' |A numeric score to apply to all resources on this node (0 or positive is healthy, negative is unhealthy) indexterm:[Node health,score] |========================================================= === Node Health Strategy === Pacemaker assigns a node health score to each node, as the sum of the values of all its node health attributes. This score will be used as a location constraint applied to this node for all resources. The +node-health-strategy+ cluster option controls how Pacemaker responds to changes in node health attributes, and how it translates +red+, +yellow+, and +green+ to scores. Allowed values are: .Node Health Strategies [width="95%",cols="1m,3<",options="header",align="center"] |========================================================= |Value |Effect |none |Do not track node health attributes at all. indexterm:[Node health,none] |migrate-on-red |Assign the value of +-INFINITY+ to +red+, and 0 to +yellow+ and +green+. This will cause all resources to move off the node if any attribute is +red+. indexterm:[Node health,migrate-on-red] |only-green |Assign the value of +-INFINITY+ to +red+ and +yellow+, and 0 to +green+. This will cause all resources to move off the node if any attribute is +red+ or +yellow+. indexterm:[Node health,only-green] |progressive |Assign the value of the +node-health-red+ cluster option to +red+, the value of +node-health-yellow+ to +yellow+, and the value of +node-health-green+ to +green+. Each node is additionally assigned a score of +node-health-base+ (this allows resources to start even if some attributes are +yellow+). This strategy gives the administrator finer control over how important each value is. indexterm:[Node health,progressive] |custom |Track node health attributes using the same values as +progressive+ for +red+, +yellow+, and +green+, but do not take them into account. The administrator is expected to implement a policy by defining rules (see <>) referencing node health attributes. indexterm:[Node health,custom] |========================================================= === Measuring Node Health === Since Pacemaker calculates node health based on node attributes, any method that sets node attributes may be used to measure node health. The most common ways are resource agents or separate daemons. Pacemaker provides examples that can be used directly or as a basis for custom code. The +ocf:pacemaker:HealthCPU+ and +ocf:pacemaker:HealthSMART+ resource agents set node health attributes based on CPU and disk parameters. The +ipmiservicelogd+ daemon sets node health attributes based on IPMI values (the +ocf:pacemaker:SystemHealth+ resource agent can be used to manage the daemon as a cluster resource). == Reloading Services After a Definition Change == The cluster automatically detects changes to the definition of services it manages. The normal response is to stop the service (using the old definition) and start it again (with the new definition). This works well, but some services are smarter and can be told to use a new set of options without restarting. To take advantage of this capability, the resource agent must: . Accept the +reload+ operation and perform any required actions. _The actions here depend completely on your application!_ + .The DRBD agent's logic for supporting +reload+ ===== [source,Bash] ------- case $1 in start) drbd_start ;; stop) drbd_stop ;; reload) drbd_reload ;; monitor) drbd_monitor ;; *) drbd_usage exit $OCF_ERR_UNIMPLEMENTED ;; esac exit $? ------- ===== . Advertise the +reload+ operation in the +actions+ section of its metadata + .The DRBD Agent Advertising Support for the +reload+ Operation ===== [source,XML] ------- 1.1 Master/Slave OCF Resource Agent for DRBD ... ------- ===== . Advertise one or more parameters that can take effect using +reload+. + Any parameter with the +unique+ set to 0 is eligible to be used in this way. + .Parameter that can be changed using reload ===== [source,XML] ------- Full path to the drbd.conf file. Path to drbd.conf ------- ===== Once these requirements are satisfied, the cluster will automatically know to reload the resource (instead of restarting) when a non-unique field changes. [NOTE] ====== Metadata will not be re-read unless the resource needs to be started. This may mean that the resource will be restarted the first time, even though you changed a parameter with +unique=0+. ====== [NOTE] ====== If both a unique and non-unique field are changed simultaneously, the resource will still be restarted. ====== diff --git a/doc/Pacemaker_Explained/en-US/Ch-Intro.txt b/doc/Pacemaker_Explained/en-US/Ch-Intro.txt index dad06350b7..4975281b54 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Intro.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Intro.txt @@ -1,25 +1,23 @@ = Read-Me-First = == The Scope of this Document == -The purpose of this document is to definitively explain the concepts -used to configure Pacemaker. To achieve this, it will focus -exclusively on the XML syntax used to configure the CIB. - -For those that are allergic to XML, there exist several unified shells -and GUIs for Pacemaker. However these tools will not be covered at all -in this document -footnote:[I hope, however, that the concepts explained here make the functionality of these tools more easily understood.] -, precisely because they hide the XML. - -Additionally, this document is NOT a step-by-step how-to guide for -configuring a specific clustering scenario. +This document is intended to be an exhaustive reference for +configuring Pacemaker. -Although such guides exist, +To achieve this, it focuses on the XML syntax used to configure the CIB. +For those that are allergic to XML, multiple higher-level front-ends +(both command-line and GUI) are available. These tools will not be covered at +all in this document footnote:[ -For example, see https://www.clusterlabs.org/pacemaker/doc/[Clusters from Scratch] -] -the purpose of this document is to provide an understanding of the building -blocks that can be used to construct any type of Pacemaker cluster. +I hope, however, that the concepts explained here make the functionality of +these tools more easily understood. +]. + +Users may be interested in other parts of the +https://www.clusterlabs.org/pacemaker/doc/[Pacemaker documentation set], +such as 'Clusters from Scratch', a step-by-step guide to setting up an +example cluster, and 'Pacemaker Administration', a guide to maintaining a +cluster. include::../../shared/en-US/pacemaker-intro.txt[] diff --git a/doc/Pacemaker_Explained/en-US/Ch-Nodes.txt b/doc/Pacemaker_Explained/en-US/Ch-Nodes.txt index b55ff83911..75bb4fa8db 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Nodes.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Nodes.txt @@ -1,134 +1,86 @@ = Cluster Nodes = == Defining a Cluster Node == Each node in the cluster will have an entry in the nodes section containing its UUID, uname, and type. .Example Corosync cluster node entry ====== [source,XML] ====== In normal circumstances, the admin should let the cluster populate this information automatically from the communications and membership data. [[s-node-name]] == Where Pacemaker Gets the Node Name == Traditionally, Pacemaker required nodes to be referred to by the value returned by `uname -n`. This can be problematic for services that require the `uname -n` to be a specific value (e.g. for a licence file). This requirement has been relaxed for clusters using Corosync 2.0 or later. The name Pacemaker uses is: . The value stored in +corosync.conf+ under *ring0_addr* in the *nodelist*, if it does not contain an IP address; otherwise . The value stored in +corosync.conf+ under *name* in the *nodelist*; otherwise . The value of `uname -n` Pacemaker provides the `crm_node -n` command which displays the name used by a running cluster. If a Corosync *nodelist* is used, `crm_node --name-for-id` pass:[number] is also available to display the name used by the node with the corosync *nodeid* of pass:[number], for example: `crm_node --name-for-id 2`. [[s-node-attributes]] == Node Attributes == indexterm:[Node,attribute] 'Node attributes' are a special type of option (name-value pair) that applies to a node object. Beyond the basic definition of a node, the administrator can describe the node's attributes, such as how much RAM, disk, what OS or kernel version it has, perhaps even its physical location. This information can then be used by the cluster when deciding where to place resources. For more information on the use of node attributes, see <>. Node attributes can be specified ahead of time or populated later, when the cluster is running, using `crm_attribute`. Below is what the node's definition would look like if the admin ran the command: .Result of using crm_attribute to specify which kernel pcmk-1 is running ====== ------- # crm_attribute --type nodes --node pcmk-1 --name kernel --update $(uname -r) ------- [source,XML] ------- ------- ====== Rather than having to read the XML, a simpler way to determine the current value of an attribute is to use `crm_attribute` again: ---- # crm_attribute --type nodes --node pcmk-1 --name kernel --query scope=nodes name=kernel value=3.10.0-123.13.2.el7.x86_64 ---- By specifying `--type nodes` the admin tells the cluster that this attribute is persistent. There are also transient attributes which are kept in the status section which are "forgotten" whenever the node rejoins the cluster. The cluster uses this area to store a record of how many times a resource has failed on that node, but administrators can also read and write to this section by specifying `--type status`. - -== Managing Nodes in a Corosync-Based Cluster == - -=== Adding a New Corosync Node === - -indexterm:[Corosync,Add Cluster Node] -indexterm:[Add Cluster Node,Corosync] - -To add a new node: - -. Install Corosync and Pacemaker on the new host. -. Copy +/etc/corosync/corosync.conf+ and +/etc/corosync/authkey+ (if it exists) - from an existing node. You may need to modify the *mcastaddr* option to match - the new node's IP address. -. Start the cluster software on the new host. If a log message containing - "Invalid digest" appears from Corosync, the keys are not consistent between - the machines. - -=== Removing a Corosync Node === - -indexterm:[Corosync,Remove Cluster Node] -indexterm:[Remove Cluster Node,Corosync] - -Because the messaging and membership layers are the authoritative -source for cluster nodes, deleting them from the CIB is not a complete -solution. First, one must arrange for corosync to forget about the -node (*pcmk-1* in the example below). - -. Stop the cluster on the host to be removed. How to do this will vary with - your operating system and installed versions of cluster software, for example, - `pcs cluster stop` if you are using pcs for cluster management. -. From one of the remaining active cluster nodes, tell Pacemaker to forget - about the removed host, which will also delete the node from the CIB: -+ ----- -# crm_node -R pcmk-1 ----- - -=== Replacing a Corosync Node === - -indexterm:[Corosync,Replace Cluster Node] -indexterm:[Replace Cluster Node,Corosync] - -To replace an existing cluster node: - -. Make sure the old node is completely stopped. -. Give the new machine the same hostname and IP address as the old one. -. Follow the procedure above for adding a node. diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt index e12591a486..6438f9e164 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt @@ -1,448 +1,408 @@ = Cluster-Wide Configuration = +== Configuration Layout == + +The cluster is defined by the Cluster Information Base (CIB), +which uses XML notation. The simplest CIB, an empty one, looks like this: + +.An empty configuration +====== +[source,XML] +------- + + + + + + + + + +------- +====== + +The empty configuration above contains the major sections that make up a CIB: + +* +cib+: The entire CIB is enclosed with a +cib+ tag. Certain fundamental settings + are defined as attributes of this tag. + + ** +configuration+: This section -- the primary focus of this document -- + contains traditional configuration information such as what resources the + cluster serves and the relationships among them. + + *** +crm_config+: cluster-wide configuration options + *** +nodes+: the machines that host the cluster + *** +resources+: the services run by the cluster + *** +constraints+: indications of how resources should be placed + + ** +status+: This section contains the history of each resource on each node. + Based on this data, the cluster can construct the complete current + state of the cluster. The authoritative source for this section + is the local resource manager (lrmd process) on each cluster node, and + the cluster will occasionally repopulate the entire section. For this + reason, it is never written to disk, and administrators are advised + against modifying it in any way. + +In this document, configuration settings will be described as 'properties' or 'options' +based on how they are defined in the CIB: + +* Properties are XML attributes of an XML element. +* Options are name-value pairs expressed as +nvpair+ child elements of an XML element. + +Normally, you will use command-line tools that abstract the XML, so the +distinction will be unimportant; both properties and options are +cluster settings you can tweak. + == CIB Properties == Certain settings are defined by CIB properties (that is, attributes of the +cib+ tag) rather than with the rest of the cluster configuration in the +configuration+ section. The reason is simply a matter of parsing. These options are used by the configuration database which is, by design, mostly ignorant of the content it holds. So the decision was made to place them in an easy-to-find location. .CIB Properties [width="95%",cols="2m,5<",options="header",align="center"] |========================================================= |Field |Description | admin_epoch | indexterm:[Configuration Version,Cluster] indexterm:[Cluster,Option,Configuration Version] indexterm:[admin_epoch,Cluster Option] indexterm:[Cluster,Option,admin_epoch] When a node joins the cluster, the cluster performs a check to see which node has the best configuration. It asks the node with the highest (+admin_epoch+, +epoch+, +num_updates+) tuple to replace the configuration on all the nodes -- which makes setting them, and setting them correctly, very important. +admin_epoch+ is never modified by the cluster; you can use this to make the configurations on any inactive nodes obsolete. _Never set this value to zero_. In such cases, the cluster cannot tell the difference between your configuration and the "empty" one used when nothing is found on disk. | epoch | indexterm:[epoch,Cluster Option] indexterm:[Cluster,Option,epoch] The cluster increments this every time the configuration is updated (usually by the administrator). | num_updates | indexterm:[num_updates,Cluster Option] indexterm:[Cluster,Option,num_updates] The cluster increments this every time the configuration or status is updated (usually by the cluster) and resets it to 0 when epoch changes. | validate-with | indexterm:[validate-with,Cluster Option] indexterm:[Cluster,Option,validate-with] Determines the type of XML validation that will be done on the configuration. If set to +none+, the cluster will not verify that updates conform to the DTD (nor reject ones that don't). This option can be useful when operating a mixed-version cluster during an upgrade. |cib-last-written | indexterm:[cib-last-written,Cluster Property] indexterm:[Cluster,Property,cib-last-written] Indicates when the configuration was last written to disk. Maintained by the cluster; for informational purposes only. |have-quorum | indexterm:[have-quorum,Cluster Property] indexterm:[Cluster,Property,have-quorum] Indicates if the cluster has quorum. If false, this may mean that the cluster cannot start resources or fence other nodes (see +no-quorum-policy+ below). Maintained by the cluster. |dc-uuid | indexterm:[dc-uuid,Cluster Property] indexterm:[Cluster,Property,dc-uuid] Indicates which cluster node is the current leader. Used by the cluster when placing resources and determining the order of some events. Maintained by the cluster. |========================================================= -=== Working with CIB Properties === - -Although these fields can be written to by the user, in -most cases the cluster will overwrite any values specified by the -user with the "correct" ones. - -To change the ones that can be specified by the user, -for example +admin_epoch+, one should use: ----- -# cibadmin --modify --xml-text '' ----- - -A complete set of CIB properties will look something like this: - -.Attributes set for a cib object -====== -[source,XML] -------- - -------- -====== - [[s-cluster-options]] == Cluster Options == Cluster options, as you might expect, control how the cluster behaves when confronted with certain situations. They are grouped into sets within the +crm_config+ section, and, in advanced configurations, there may be more than one set. (This will be described later in the section on <> where we will show how to have the cluster use different sets of options during working hours than during weekends.) For now, we will describe the simple case where each option is present at most once. You can obtain an up-to-date list of cluster options, including their default values, by running the `man pengine` and `man crmd` commands. .Cluster Options [width="95%",cols="5m,2,11>). | enable-startup-probes | TRUE | indexterm:[enable-startup-probes,Cluster Option] indexterm:[Cluster,Option,enable-startup-probes] Should the cluster check for active resources during startup? | maintenance-mode | FALSE | indexterm:[maintenance-mode,Cluster Option] indexterm:[Cluster,Option,maintenance-mode] Should the cluster refrain from monitoring, starting and stopping resources? | stonith-enabled | TRUE | indexterm:[stonith-enabled,Cluster Option] indexterm:[Cluster,Option,stonith-enabled] Should failed nodes and nodes with resources that can't be stopped be shot? If you value your data, set up a STONITH device and enable this. If true, or unset, the cluster will refuse to start resources unless one or more STONITH resources have been configured. If false, unresponsive nodes are immediately assumed to be running no resources, and resource takeover to online nodes starts without any further protection (which means _data loss_ if the unresponsive node still accesses shared storage, for example). See also the +requires+ meta-attribute in <>. | stonith-action | reboot | indexterm:[stonith-action,Cluster Option] indexterm:[Cluster,Option,stonith-action] Action to send to STONITH device. Allowed values are +reboot+ and +off+. The value +poweroff+ is also allowed, but is only used for legacy devices. | stonith-timeout | 60s | indexterm:[stonith-timeout,Cluster Option] indexterm:[Cluster,Option,stonith-timeout] How long to wait for STONITH actions (reboot, on, off) to complete | stonith-max-attempts | 10 | indexterm:[stonith-max-attempts,Cluster Option] indexterm:[Cluster,Option,stonith-max-attempts] How many times fencing can fail for a target before the cluster will no longer immediately re-attempt it. | stonith-watchdog-timeout | 0 | indexterm:[stonith-watchdog-timeout,Cluster Option] indexterm:[Cluster,Option,stonith-watchdog-timeout] If nonzero, rely on hardware watchdog self-fencing. If positive, assume unseen nodes self-fence within this much time. If negative, and the SBD_WATCHDOG_TIMEOUT environment variable is set, use twice that value. | concurrent-fencing | FALSE | indexterm:[concurrent-fencing,Cluster Option] indexterm:[Cluster,Option,concurrent-fencing] Is the cluster allowed to initiate multiple fence actions concurrently? | cluster-delay | 60s | indexterm:[cluster-delay,Cluster Option] indexterm:[Cluster,Option,cluster-delay] Estimated maximum round-trip delay over the network (excluding action execution). If the TE requires an action to be executed on another node, it will consider the action failed if it does not get a response from the other node in this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. | dc-deadtime | 20s | indexterm:[dc-deadtime,Cluster Option] indexterm:[Cluster,Option,dc-deadtime] How long to wait for a response from other nodes during startup. The "correct" value will depend on the speed/load of your network and the type of switches used. | cluster-recheck-interval | 15min | indexterm:[cluster-recheck-interval,Cluster Option] indexterm:[Cluster,Option,cluster-recheck-interval] Polling interval for time-based changes to options, resource parameters and constraints. The Cluster is primarily event-driven, but your configuration can have elements that take effect based on the time of day. To ensure these changes take effect, we can optionally poll the cluster's status for changes. A value of 0 disables polling. Positive values are an interval (in seconds unless other SI units are specified, e.g. 5min). | cluster-ipc-limit | 500 | indexterm:[cluster-ipc-limit,Cluster Option] indexterm:[Cluster,Option,cluster-ipc-limit] The maximum IPC message backlog before one cluster daemon will disconnect another. This is of use in large clusters, for which a good value is the number of resources in the cluster multiplied by the number of nodes. The default of 500 is also the minimum. Raise this if you see "Evicting client" messages for cluster daemon PIDs in the logs. | pe-error-series-max | -1 | indexterm:[pe-error-series-max,Cluster Option] indexterm:[Cluster,Option,pe-error-series-max] The number of PE inputs resulting in ERRORs to save. Used when reporting problems. A value of -1 means unlimited (report all). | pe-warn-series-max | -1 | indexterm:[pe-warn-series-max,Cluster Option] indexterm:[Cluster,Option,pe-warn-series-max] The number of PE inputs resulting in WARNINGs to save. Used when reporting problems. A value of -1 means unlimited (report all). | pe-input-series-max | -1 | indexterm:[pe-input-series-max,Cluster Option] indexterm:[Cluster,Option,pe-input-series-max] The number of "normal" PE inputs to save. Used when reporting problems. A value of -1 means unlimited (report all). | placement-strategy | default | indexterm:[placement-strategy,Cluster Option] indexterm:[Cluster,Option,placement-strategy] How the cluster should allocate resources to nodes (see <>). Allowed values are +default+, +utilization+, +balanced+, and +minimal+. | node-health-strategy | none | indexterm:[node-health-strategy,Cluster Option] indexterm:[Cluster,Option,node-health-strategy] How the cluster should react to node health attributes (see <>). Allowed values are +none+, +migrate-on-red+, +only-green+, +progressive+, and +custom+. | node-health-base | 0 | indexterm:[node-health-base,Cluster Option] indexterm:[Cluster,Option,node-health-base] The base health score assigned to a node. Only used when +node-health-strategy+ is +progressive+. | node-health-green | 0 | indexterm:[node-health-green,Cluster Option] indexterm:[Cluster,Option,node-health-green] The score to use for a node health attribute whose value is +green+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | node-health-yellow | 0 | indexterm:[node-health-yellow,Cluster Option] indexterm:[Cluster,Option,node-health-yellow] The score to use for a node health attribute whose value is +yellow+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | node-health-red | 0 | indexterm:[node-health-red,Cluster Option] indexterm:[Cluster,Option,node-health-red] The score to use for a node health attribute whose value is +red+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | remove-after-stop | FALSE | indexterm:[remove-after-stop,Cluster Option] indexterm:[Cluster,Option,remove-after-stop] _Advanced Use Only:_ Should the cluster remove resources from the LRM after they are stopped? Values other than the default are, at best, poorly tested and potentially dangerous. | startup-fencing | TRUE | indexterm:[startup-fencing,Cluster Option] indexterm:[Cluster,Option,startup-fencing] _Advanced Use Only:_ Should the cluster shoot unseen nodes? Not using the default is very unsafe! | election-timeout | 2min | indexterm:[election-timeout,Cluster Option] indexterm:[Cluster,Option,election-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | shutdown-escalation | 20min | indexterm:[shutdown-escalation,Cluster Option] indexterm:[Cluster,Option,shutdown-escalation] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-integration-timeout | 3min | indexterm:[crmd-integration-timeout,Cluster Option] indexterm:[Cluster,Option,crmd-integration-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-finalization-timeout | 30min | indexterm:[crmd-finalization-timeout,Cluster Option] indexterm:[Cluster,Option,crmd-finalization-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-transition-delay | 0s | indexterm:[crmd-transition-delay,Cluster Option] indexterm:[Cluster,Option,crmd-transition-delay] _Advanced Use Only:_ Delay cluster recovery for the configured interval to allow for additional/related events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. Enabling this option will slow down cluster recovery under all conditions. |========================================================= - -=== Querying and Setting Cluster Options === - -indexterm:[Querying,Cluster Option] -indexterm:[Setting,Cluster Option] -indexterm:[Cluster,Querying Options] -indexterm:[Cluster,Setting Options] - -Cluster options can be queried and modified using the `crm_attribute` tool. To -get the current value of +cluster-delay+, you can run: - ----- -# crm_attribute --query --name cluster-delay ----- - -which is more simply written as - ----- -# crm_attribute -G -n cluster-delay ----- - -If a value is found, you'll see a result like this: - ----- -# crm_attribute -G -n cluster-delay -scope=crm_config name=cluster-delay value=60s ----- - -If no value is found, the tool will display an error: - ----- -# crm_attribute -G -n clusta-deway -scope=crm_config name=clusta-deway value=(null) -Error performing operation: No such device or address ----- - -To use a different value (for example, 30 seconds), simply run: - ----- -# crm_attribute --name cluster-delay --update 30s ----- - -To go back to the cluster's default value, you can delete the value, for example: - ----- -# crm_attribute --name cluster-delay --delete -Deleted crm_config option: id=cib-bootstrap-options-cluster-delay name=cluster-delay ----- - -=== When Options are Listed More Than Once === - -If you ever see something like the following, it means that the option you're modifying is present more than once. - -.Deleting an option that is listed twice -======= ------- -# crm_attribute --name batch-limit --delete - -Multiple attributes match name=batch-limit in crm_config: -Value: 50 (set=cib-bootstrap-options, id=cib-bootstrap-options-batch-limit) -Value: 100 (set=custom, id=custom-batch-limit) -Please choose from one of the matches above and supply the 'id' with --id -------- -======= - -In such cases, follow the on-screen instructions to perform the -requested action. To determine which value is currently being used by -the cluster, refer to <>. diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt index b4ce3b1c22..2ba64c0a0a 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt @@ -1,854 +1,854 @@ = Cluster Resources = [[s-resource-primitive]] == What is a Cluster Resource? == indexterm:[Resource] A resource is a service made highly available by a cluster. The simplest type of resource, a 'primitive' resource, is described in this chapter. More complex forms, such as groups and clones, are described in later chapters. Every primitive resource has a 'resource agent'. A resource agent is an external program that abstracts the service it provides and present a consistent view to the cluster. This allows the cluster to be agnostic about the resources it manages. The cluster doesn't need to understand how the resource works because it relies on the resource agent to do the right thing when given a `start`, `stop` or `monitor` command. For this reason, it is crucial that resource agents are well-tested. Typically, resource agents come in the form of shell scripts. However, they can be written using any technology (such as C, Python or Perl) that the author is comfortable with. [[s-resource-supported]] == Resource Classes == indexterm:[Resource,class] Pacemaker supports several classes of agents: * OCF * LSB * Upstart * Systemd * Service * Fencing * Nagios Plugins === Open Cluster Framework === indexterm:[Resource,OCF] indexterm:[OCF,Resources] indexterm:[Open Cluster Framework,Resources] The OCF standard footnote:[See http://www.opencf.org/cgi-bin/viewcvs.cgi/specs/ra/resource-agent-api.txt?rev=HEAD -- at least as it relates to resource agents. The Pacemaker implementation has been somewhat extended from the OCF specs, but none of those changes are incompatible with the original OCF specification.] is basically an extension of the Linux Standard Base conventions for init scripts to: * support parameters, * make them self-describing, and * make them extensible OCF specs have strict definitions of the exit codes that actions must return. footnote:[ The resource-agents source code includes the `ocf-tester` script, which can be useful in this regard. ] The cluster follows these specifications exactly, and giving the wrong exit code will cause the cluster to behave in ways you will likely find puzzling and annoying. In particular, the cluster needs to distinguish a completely stopped resource from one which is in some erroneous and indeterminate state. Parameters are passed to the resource agent as environment variables, with the special prefix +OCF_RESKEY_+. So, a parameter which the user thinks of as +ip+ will be passed to the resource agent as +OCF_RESKEY_ip+. The number and purpose of the parameters is left to the resource agent; however, the resource agent should use the `meta-data` command to advertise any that it supports. The OCF class is the most preferred as it is an industry standard, highly flexible (allowing parameters to be passed to agents in a non-positional manner) and self-describing. For more information, see the http://www.linux-ha.org/wiki/OCF_Resource_Agents[reference] and -<>. +the 'Resource Agents' chapter of 'Pacemaker Administration'. === Linux Standard Base === indexterm:[Resource,LSB] indexterm:[LSB,Resources] indexterm:[Linux Standard Base,Resources] LSB resource agents are those found in +/etc/init.d+. Generally, they are provided by the OS distribution and, in order to be used with the cluster, they must conform to the LSB Spec. footnote:[ See http://refspecs.linux-foundation.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html for the LSB Spec as it relates to init scripts. ] [WARNING] ==== Many distributions claim LSB compliance but ship with broken init scripts. For details on how to check whether your init script is -LSB-compatible, see <>. Common problematic violations of -the LSB standard include: +LSB-compatible, see the 'Resource Agents' chapter of 'Pacemaker +Administration'. Common problematic violations of the LSB standard include: * Not implementing the status operation at all * Not observing the correct exit status codes for `start/stop/status` actions * Starting a started resource returns an error * Stopping a stopped resource returns an error ==== [IMPORTANT] ==== Remember to make sure the computer is _not_ configured to start any services at boot time -- that should be controlled by the cluster. ==== === Systemd === indexterm:[Resource,Systemd] indexterm:[Systemd,Resources] Some newer distributions have replaced the old http://en.wikipedia.org/wiki/Init#SysV-style["SysV"] style of initialization daemons and scripts with an alternative called http://www.freedesktop.org/wiki/Software/systemd[Systemd]. Pacemaker is able to manage these services _if they are present_. Instead of init scripts, systemd has 'unit files'. Generally, the services (unit files) are provided by the OS distribution, but there are online guides for converting from init scripts. footnote:[For example, http://0pointer.de/blog/projects/systemd-for-admins-3.html] [IMPORTANT] ==== Remember to make sure the computer is _not_ configured to start any services at boot time -- that should be controlled by the cluster. ==== === Upstart === indexterm:[Resource,Upstart] indexterm:[Upstart,Resources] Some newer distributions have replaced the old http://en.wikipedia.org/wiki/Init#SysV-style["SysV"] style of initialization daemons (and scripts) with an alternative called http://upstart.ubuntu.com/[Upstart]. Pacemaker is able to manage these services _if they are present_. Instead of init scripts, upstart has 'jobs'. Generally, the services (jobs) are provided by the OS distribution. [IMPORTANT] ==== Remember to make sure the computer is _not_ configured to start any services at boot time -- that should be controlled by the cluster. ==== === System Services === indexterm:[Resource,System Services] indexterm:[System Service,Resources] Since there are various types of system services (+systemd+, +upstart+, and +lsb+), Pacemaker supports a special +service+ alias which intelligently figures out which one applies to a given cluster node. This is particularly useful when the cluster contains a mix of +systemd+, +upstart+, and +lsb+. In order, Pacemaker will try to find the named service as: . an LSB init script . a Systemd unit file . an Upstart job === STONITH === indexterm:[Resource,STONITH] indexterm:[STONITH,Resources] The STONITH class is used exclusively for fencing-related resources. This is discussed later in <>. === Nagios Plugins === indexterm:[Resource,Nagios Plugins] indexterm:[Nagios Plugins,Resources] Nagios Plugins footnote:[The project has two independent forks, hosted at https://www.nagios-plugins.org/ and https://www.monitoring-plugins.org/. Output from both projects' plugins is similar, so plugins from either project can be used with pacemaker.] allow us to monitor services on remote hosts. Pacemaker is able to do remote monitoring with the plugins _if they are present_. A common use case is to configure them as resources belonging to a resource container (usually a virtual machine), and the container will be restarted if any of them has failed. Another use is to configure them as ordinary resources to be used for monitoring hosts or services via the network. The supported parameters are same as the long options of the plugin. [[primitive-resource]] == Resource Properties == These values tell the cluster which resource agent to use for the resource, where to find that resource agent and what standards it conforms to. .Properties of a Primitive Resource [width="95%",cols="1m,6<",options="header",align="center"] |========================================================= |Field |Description |id |Your name for the resource indexterm:[id,Resource] indexterm:[Resource,Property,id] |class |The standard the resource agent conforms to. Allowed values: +lsb+, +nagios+, +ocf+, +service+, +stonith+, +systemd+, +upstart+ indexterm:[class,Resource] indexterm:[Resource,Property,class] |type |The name of the Resource Agent you wish to use. E.g. +IPaddr+ or +Filesystem+ indexterm:[type,Resource] indexterm:[Resource,Property,type] |provider |The OCF spec allows multiple vendors to supply the same resource agent. To use the OCF resource agents supplied by the Heartbeat project, you would specify +heartbeat+ here. indexterm:[provider,Resource] indexterm:[Resource,Property,provider] |========================================================= The XML definition of a resource can be queried with the `crm_resource` tool. For example: ---- # crm_resource --resource Email --query-xml ---- might produce: .A system resource definition ===== [source,XML] ===== [NOTE] ===== One of the main drawbacks to system services (LSB, systemd or Upstart) resources is that they do not allow any parameters! ===== //// See https://tools.ietf.org/html/rfc5737 for choice of example IP address //// .An OCF resource definition ===== [source,XML] ------- ------- ===== [[s-resource-options]] == Resource Options == Resources have two types of options: 'meta-attributes' and 'instance attributes'. Meta-attributes apply to any type of resource, while instance attributes are specific to each resource agent. === Resource Meta-Attributes === Meta-attributes are used by the cluster to decide how a resource should behave and can be easily set using the `--meta` option of the `crm_resource` command. .Meta-attributes of a Primitive Resource [width="95%",cols="2m,2,5> resources, promoted to master if appropriate) * +Slave:+ Allow the resource to be started, but only in Slave mode if the resource is <> * +Master:+ Equivalent to +Started+ indexterm:[target-role,Resource Option] indexterm:[Resource,Option,target-role] |is-managed |TRUE |Is the cluster allowed to start and stop the resource? Allowed values: +true+, +false+ indexterm:[is-managed,Resource Option] indexterm:[Resource,Option,is-managed] |resource-stickiness |value of +resource-stickiness+ in the +rsc_defaults+ section |How much does the resource prefer to stay where it is? indexterm:[resource-stickiness,Resource Option] indexterm:[Resource,Option,resource-stickiness] |requires |+quorum+ for resources with a +class+ of +stonith+, otherwise +unfencing+ if unfencing is active in the cluster, otherwise +fencing+ if +stonith-enabled+ is true, otherwise +quorum+ |Conditions under which the resource can be started Allowed values: * +nothing:+ can always be started * +quorum:+ The cluster can only start this resource if a majority of the configured nodes are active * +fencing:+ The cluster can only start this resource if a majority of the configured nodes are active _and_ any failed or unknown nodes have been <> * +unfencing:+ The cluster can only start this resource if a majority of the configured nodes are active _and_ any failed or unknown nodes have been fenced _and_ only on nodes that have been <> indexterm:[requires,Resource Option] indexterm:[Resource,Option,requires] |migration-threshold |INFINITY |How many failures may occur for this resource on a node, before this node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible); by constrast, the cluster treats INFINITY (the default) as a very large but finite number. This option has an effect only if the failed operation has on-fail=restart (the default), and additionally for failed start operations, if the cluster property start-failure-is-fatal is false. indexterm:[migration-threshold,Resource Option] indexterm:[Resource,Option,migration-threshold] |failure-timeout |0 |How many seconds to wait before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. As with any time-based actions, this is not guaranteed to be checked more frequently than the value of +cluster-recheck-interval+ (see <>). indexterm:[failure-timeout,Resource Option] indexterm:[Resource,Option,failure-timeout] |multiple-active |stop_start |What should the cluster do if it ever finds the resource active on more than one node? Allowed values: * +block:+ mark the resource as unmanaged * +stop_only:+ stop all active instances and leave them that way * +stop_start:+ stop all active instances and start the resource in one location only indexterm:[multiple-active,Resource Option] indexterm:[Resource,Option,multiple-active] |allow-migrate |TRUE for ocf:pacemaker:remote resources, FALSE otherwise |Whether the cluster should try to "live migrate" this resource when it needs to be moved (see <>) |container-attribute-target | |Specific to bundle resources; see <> |remote-node | |The name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. +WARNING:+ This value cannot overlap with any resource or node IDs. |remote-port |3121 |If +remote-node+ is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. |remote-addr |value of +remote-node+ |If +remote-node+ is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. |remote-connect-timeout |60s |If +remote-node+ is specified, how long before a pending guest connection will time out. |========================================================= As an example of setting resource options, if you performed the following commands on an LSB Email resource: ------- # crm_resource --meta --resource Email --set-parameter priority --parameter-value 100 # crm_resource -m -r Email -p multiple-active -v block ------- the resulting resource definition might be: .An LSB resource with cluster options ===== [source,XML] ------- ------- ===== [[s-resource-defaults]] === Setting Global Defaults for Resource Meta-Attributes === To set a default value for a resource option, add it to the +rsc_defaults+ section with `crm_attribute`. For example, ---- # crm_attribute --type rsc_defaults --name is-managed --update false ---- would prevent the cluster from starting or stopping any of the resources in the configuration (unless of course the individual resources were specifically enabled by having their +is-managed+ set to +true+). === Resource Instance Attributes === The resource agents of some resource classes (lsb, systemd and upstart 'not' among them) can be given parameters which determine how they behave and which instance of a service they control. If your resource agent supports parameters, you can add them with the `crm_resource` command. For example, ---- # crm_resource --resource Public-IP --set-parameter ip --parameter-value 192.0.2.2 ---- would create an entry in the resource like this: .An example OCF resource with instance attributes ===== [source,XML] ------- ------- ===== For an OCF resource, the result would be an environment variable called +OCF_RESKEY_ip+ with a value of +192.0.2.2+. The list of instance attributes supported by an OCF resource agent can be found by calling the resource agent with the `meta-data` command. The output contains an XML description of all the supported attributes, their purpose and default values. .Displaying the metadata for the Dummy resource agent template ===== ---- # export OCF_ROOT=/usr/lib/ocf # $OCF_ROOT/resource.d/pacemaker/Dummy meta-data ---- [source,XML] ------- 1.0 This is a Dummy Resource Agent. It does absolutely nothing except keep track of whether its running or not. Its purpose in life is for testing and to serve as a template for RA writers. NB: Please pay attention to the timeouts specified in the actions section below. They should be meaningful for the kind of resource the agent manages. They should be the minimum advised timeouts, but they shouldn't/cannot cover _all_ possible resource instances. So, try to be neither overly generous nor too stingy, but moderate. The minimum timeouts should never be below 10 seconds. Example stateless resource agent Location to store the resource state in. State file Fake attribute that can be changed to cause a reload Fake attribute that can be changed to cause a reload Number of seconds to sleep during operations. This can be used to test how the cluster reacts to operation timeouts. Operation sleep duration in seconds. ------- ===== == Resource Operations == indexterm:[Resource,Action] 'Operations' are actions the cluster can perform on a resource by calling the resource agent. Resource agents must support certain common operations such as start, stop and monitor, and may implement any others. Some operations are generated by the cluster itself, for example, stopping and starting resources as needed. You can configure operations in the cluster configuration. As an example, by default the cluster will 'not' ensure your resources stay healthy once they are started. footnote:[Currently, anyway. Automatic monitoring operations may be added in a future version of Pacemaker.] To instruct the cluster to do this, you need to add a +monitor+ operation to the resource's definition. .An OCF resource with a recurring health check ===== [source,XML] ------- ------- ===== .Properties of an Operation [width="95%",cols="2m,3,6>. indexterm:[interval,Action Property] indexterm:[Action,Property,interval] |timeout | |How long to wait before declaring the action has failed indexterm:[timeout,Action Property] indexterm:[Action,Property,timeout] |on-fail |restart '(except for stop operations, which default to' fence 'when STONITH is enabled and' block 'otherwise)' |The action to take if this action ever fails. Allowed values: * +ignore:+ Pretend the resource did not fail. * +block:+ Don't perform any further operations on the resource. * +stop:+ Stop the resource and do not start it elsewhere. * +restart:+ Stop the resource and start it again (possibly on a different node). * +fence:+ STONITH the node on which the resource failed. * +standby:+ Move _all_ resources away from the node on which the resource failed. indexterm:[on-fail,Action Property] indexterm:[Action,Property,on-fail] |enabled |TRUE |If +false+, ignore this operation definition. This is typically used to pause a particular recurring monitor operation; for instance, it can complement the respective resource being unmanaged (+is-managed=false+), as this alone will <>. Disabling the operation does not suppress all actions of the given type. Allowed values: +true+, +false+. indexterm:[enabled,Action Property] indexterm:[Action,Property,enabled] |record-pending |FALSE |If +true+, the intention to perform the operation is recorded so that GUIs and CLI tools can indicate that an operation is in progress. This is best set as an _operation default_ (see next section). Allowed values: +true+, +false+. indexterm:[enabled,Action Property] indexterm:[Action,Property,enabled] |role | |Run the operation only on node(s) that the cluster thinks should be in the specified role. This only makes sense for recurring monitor operations. Allowed (case-sensitive) values: +Stopped+, +Started+, and in the case of <> resources, +Slave+ and +Master+. indexterm:[role,Action Property] indexterm:[Action,Property,role] |========================================================= [[s-resource-monitoring]] === Monitoring Resources for Failure === When Pacemaker first starts a resource, it runs one-time monitor operations (referred to as 'probes') to ensure the resource is running where it's supposed to be, and not running where it's not supposed to be. (This behavior can be affected by the +resource-discovery+ location constraint property.) Other than those initial probes, Pacemaker will not (by default) check that the resource continues to stay healthy. As in the example above, you must configure monitor operations explicitly to perform these checks. By default, a monitor operation will ensure that the resource is running where it is supposed to. The +target-role+ property can be used for further checking. For example, if a resource has one monitor operation with +interval=10 role=Started+ and a second monitor operation with +interval=11 role=Stopped+, the cluster will run the first monitor on any nodes it thinks 'should' be running the resource, and the second monitor on any nodes that it thinks 'should not' be running the resource (for the truly paranoid, who want to know when an administrator manually starts a service by mistake). [[s-monitoring-unmanaged]] === Monitoring Resources When Administration is Disabled === Recurring monitor operations behave differently under various administrative settings: * When a resource is unmanaged (by setting +is-managed=false+): No monitors will be stopped. + If the unmanaged resource is stopped on a node where the cluster thinks it should be running, the cluster will detect and report that it is not, but it will not consider the monitor failed, and will not try to start the resource until it is managed again. + Starting the unmanaged resource on a different node is strongly discouraged and will at least cause the cluster to consider the resource failed, and may require the resource's +target-role+ to be set to +Stopped+ then +Started+ to be recovered. * When a node is put into standby: All resources will be moved away from the node, and all monitor operations will be stopped on the node, except those with +role=Stopped+. Monitor operations with +role=Stopped+ will be started on the node if appropriate. * When the cluster is put into maintenance mode: All resources will be marked as unmanaged. All monitor operations will be stopped, except those with +role=Stopped+. As with single unmanaged resources, starting a resource on a node other than where the cluster expects it to be will cause problems. [[s-operation-defaults]] === Setting Global Defaults for Operations === You can change the global default values for operation properties in a given cluster. These are defined in an +op_defaults+ section of the CIB's +configuration+ section, and can be set with `crm_attribute`. For example, ---- # crm_attribute --type op_defaults --name timeout --update 20s ---- would default each operation's +timeout+ to 20 seconds. If an operation's definition also includes a value for +timeout+, then that value would be used for that operation instead. === When Implicit Operations Take a Long Time === The cluster will always perform a number of implicit operations: +start+, +stop+ and a non-recurring +monitor+ operation used at startup to check whether the resource is already active. If one of these is taking too long, then you can create an entry for them and specify a longer timeout. .An OCF resource with custom timeouts for its implicit actions ===== [source,XML] ------- ------- ===== === Multiple Monitor Operations === Provided no two operations (for a single resource) have the same name and interval, you can have as many monitor operations as you like. In this way, you can do a superficial health check every minute and progressively more intense ones at higher intervals. To tell the resource agent what kind of check to perform, you need to provide each monitor with a different value for a common parameter. The OCF standard creates a special parameter called +OCF_CHECK_LEVEL+ for this purpose and dictates that it is "made available to the resource agent without the normal +OCF_RESKEY+ prefix". Whatever name you choose, you can specify it by adding an +instance_attributes+ block to the +op+ tag. It is up to each resource agent to look for the parameter and decide how to use it. .An OCF resource with two recurring health checks, performing different levels of checks specified via +OCF_CHECK_LEVEL+. ===== [source,XML] ------- ------- ===== === Disabling a Monitor Operation === The easiest way to stop a recurring monitor is to just delete it. However, there can be times when you only want to disable it temporarily. In such cases, simply add +enabled="false"+ to the operation's definition. .Example of an OCF resource with a disabled health check ===== [source,XML] ------- ------- ===== This can be achieved from the command line by executing: ---- # cibadmin --modify --xml-text '' ---- Once you've done whatever you needed to do, you can then re-enable it with ---- # cibadmin --modify --xml-text '' ---- diff --git a/doc/Pacemaker_Explained/en-US/Ch-Status.txt b/doc/Pacemaker_Explained/en-US/Ch-Status.txt index b46f0167e4..3c82074bdd 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Status.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Status.txt @@ -1,371 +1,372 @@ = Status -- Here be dragons = Most users never need to understand the contents of the status section and can be happy with the output from `crm_mon`. However for those with a curious inclination, this section attempts to provide an overview of its contents. == Node Status == indexterm:[Node,Status] indexterm:[Status of a Node] In addition to the cluster's configuration, the CIB holds an up-to-date representation of each cluster node in the +status+ section. .A bare-bones status entry for a healthy node *cl-virt-1* ====== [source,XML] ----- ----- ====== Users are highly recommended _not_ to modify any part of a node's state _directly_. The cluster will periodically regenerate the entire section from authoritative sources, so any changes should be done with the tools appropriate to those sources. .Authoritative Sources for State Information [width="95%",cols="1m,1<",options="header",align="center"] |========================================================= | CIB Object | Authoritative Source |node_state|crmd |transient_attributes|attrd |lrm|lrmd |========================================================= The fields used in the +node_state+ objects are named as they are largely for historical reasons and are rooted in Pacemaker's origins as the resource manager for the older Heartbeat project. They have remained unchanged to preserve compatibility with older versions. .Node Status Fields [width="95%",cols="1m,4<",options="header",align="center"] |========================================================= |Field |Description | id | indexterm:[id,Node Status] indexterm:[Node,Status,id] Unique identifier for the node. Corosync-based clusters use a numeric counter. | uname | indexterm:[uname,Node Status] indexterm:[Node,Status,uname] The node's name as known by the cluster | in_ccm | indexterm:[in_ccm,Node Status] indexterm:[Node,Status,in_ccm] Is the node a member at the cluster communication layer? Allowed values: +true+, +false+. | crmd | indexterm:[crmd,Node Status] indexterm:[Node,Status,crmd] Is the node a member at the pacemaker layer? Allowed values: +online+, +offline+. | crm-debug-origin | indexterm:[crm-debug-origin,Node Status] indexterm:[Node,Status,crm-debug-origin] The name of the source function that made the most recent change (for debugging purposes). | join | indexterm:[join,Node Status] indexterm:[Node,Status,join] Does the node participate in hosting resources? Allowed values: +down+, +pending+, +member+, +banned+. | expected | indexterm:[expected,Node Status] indexterm:[Node,Status,expected] Expected value for +join+. |========================================================= The cluster uses these fields to determine whether, at the node level, the node is healthy or is in a failed state and needs to be fenced. == Transient Node Attributes == Like regular <>, the name/value pairs listed in the +transient_attributes+ section help to describe the node. However they are forgotten by the cluster when the node goes offline. This can be useful, for instance, when you want a node to be in standby mode (not able to run resources) just until the next reboot. In addition to any values the administrator sets, the cluster will also store information about failed resources here. .A set of transient node attributes for node *cl-virt-1* ====== [source,XML] ----- ----- ====== In the above example, we can see that a monitor on the +pingd:0+ resource has failed once, at 09:22:22 UTC 6 April 2009. footnote:[ You can use the standard `date` command to print a human-readable version of any seconds-since-epoch value, for example `date -d @1239009742`. ] We also see that the node is connected to three *pingd* peers and that all known resources have been checked for on this machine (+probe_complete+). == Operation History == indexterm:[Operation History] A node's resource history is held in the +lrm_resources+ tag (a child of the +lrm+ tag). The information stored here includes enough information for the cluster to stop the resource safely if it is removed from the +configuration+ section. Specifically, the resource's +id+, +class+, +type+ and +provider+ are stored. .A record of the +apcstonith+ resource ====== [source,XML] ====== Additionally, we store the last job for every combination of +resource+, +action+ and +interval+. The concatenation of the values in this tuple are used to create the id of the +lrm_rsc_op+ object. .Contents of an +lrm_rsc_op+ job [width="95%",cols="2m,5<",options="header",align="center"] |========================================================= |Field |Description | id | indexterm:[id,Action Status] indexterm:[Action,Status,id] Identifier for the job constructed from the resource's +id+, +operation+ and +interval+. | call-id | indexterm:[call-id,Action Status] indexterm:[Action,Status,call-id] The job's ticket number. Used as a sort key to determine the order in which the jobs were executed. | operation | indexterm:[operation,Action Status] indexterm:[Action,Status,operation] The action the resource agent was invoked with. | interval | indexterm:[interval,Action Status] indexterm:[Action,Status,interval] The frequency, in milliseconds, at which the operation will be repeated. A one-off job is indicated by 0. | op-status | indexterm:[op-status,Action Status] indexterm:[Action,Status,op-status] The job's status. Generally this will be either 0 (done) or -1 (pending). Rarely used in favor of +rc-code+. | rc-code | indexterm:[rc-code,Action Status] indexterm:[Action,Status,rc-code] -The job's result. Refer to <> for -details on what the values here mean and how they are interpreted. +The job's result. Refer to the 'Resource Agents' chapter of 'Pacemaker +Administration' for details on what the values here mean and how they are +interpreted. | last-run | indexterm:[last-run,Action Status] indexterm:[Action,Status,last-run] Machine-local date/time, in seconds since epoch, at which the job was executed. For diagnostic purposes. | last-rc-change | indexterm:[last-rc-change,Action Status] indexterm:[Action,Status,last-rc-change] Machine-local date/time, in seconds since epoch, at which the job first returned the current value of +rc-code+. For diagnostic purposes. | exec-time | indexterm:[exec-time,Action Status] indexterm:[Action,Status,exec-time] Time, in milliseconds, that the job was running for. For diagnostic purposes. | queue-time | indexterm:[queue-time,Action Status] indexterm:[Action,Status,queue-time] Time, in seconds, that the job was queued for in the LRMd. For diagnostic purposes. | crm_feature_set | indexterm:[crm_feature_set,Action Status] indexterm:[Action,Status,crm_feature_set] The version which this job description conforms to. Used when processing +op-digest+. | transition-key | indexterm:[transition-key,Action Status] indexterm:[Action,Status,transition-key] A concatenation of the job's graph action number, the graph number, the expected result and the UUID of the crmd instance that scheduled it. This is used to construct +transition-magic+ (below). | transition-magic | indexterm:[transition-magic,Action Status] indexterm:[Action,Status,transition-magic] A concatenation of the job's +op-status+, +rc-code+ and +transition-key+. Guaranteed to be unique for the life of the cluster (which ensures it is part of CIB update notifications) and contains all the information needed for the crmd to correctly analyze and process the completed job. Most importantly, the decomposed elements tell the crmd if the job entry was expected and whether it failed. | op-digest | indexterm:[op-digest,Action Status] indexterm:[Action,Status,op-digest] An MD5 sum representing the parameters passed to the job. Used to detect changes to the configuration, to restart resources if necessary. | crm-debug-origin | indexterm:[crm-debug-origin,Action Status] indexterm:[Action,Status,crm-debug-origin] The origin of the current values. For diagnostic purposes. |========================================================= === Simple Operation History Example === .A monitor operation (determines current state of the +apcstonith+ resource) ====== [source,XML] ----- ----- ====== In the above example, the job is a non-recurring monitor operation often referred to as a "probe" for the +apcstonith+ resource. The cluster schedules probes for every configured resource on a node when the node first starts, in order to determine the resource's current state before it takes any further action. From the +transition-key+, we can see that this was the 22nd action of the 2nd graph produced by this instance of the crmd (2668bbeb-06d5-40f9-936d-24cb7f87006a). The third field of the +transition-key+ contains a 7, which indicates that the job expects to find the resource inactive. By looking at the +rc-code+ property, we see that this was the case. As that is the only job recorded for this node, we can conclude that the cluster started the resource elsewhere. === Complex Operation History Example === .Resource history of a +pingd+ clone with multiple jobs ====== [source,XML] ----- ----- ====== When more than one job record exists, it is important to first sort them by +call-id+ before interpreting them. Once sorted, the above example can be summarized as: . A non-recurring monitor operation returning 7 (not running), with a +call-id+ of 3 . A stop operation returning 0 (success), with a +call-id+ of 32 . A start operation returning 0 (success), with a +call-id+ of 33 . A recurring monitor returning 0 (success), with a +call-id+ of 34 The cluster processes each job record to build up a picture of the resource's state. After the first and second entries, it is considered stopped, and after the third it considered active. Based on the last operation, we can tell that the resource is currently active. Additionally, from the presence of a +stop+ operation with a lower +call-id+ than that of the +start+ operation, we can conclude that the resource has been restarted. Specifically this occurred as part of actions 11 and 31 of transition 11 from the crmd instance with the key +2668bbeb...+. This information can be helpful for locating the relevant section of the logs when looking for the source of a failure. diff --git a/doc/Pacemaker_Explained/en-US/Pacemaker_Explained.xml b/doc/Pacemaker_Explained/en-US/Pacemaker_Explained.xml index 0b4d602aff..9ad6e3561a 100644 --- a/doc/Pacemaker_Explained/en-US/Pacemaker_Explained.xml +++ b/doc/Pacemaker_Explained/en-US/Pacemaker_Explained.xml @@ -1,44 +1,39 @@ - - - - - Further Reading Project Website: Project Documentation: SUSE High Availibility Guide: Corosync Configuration: diff --git a/doc/Pacemaker_Explained/en-US/images b/doc/Pacemaker_Explained/en-US/images new file mode 120000 index 0000000000..963300dd95 --- /dev/null +++ b/doc/Pacemaker_Explained/en-US/images @@ -0,0 +1 @@ +../../shared/en-US/images \ No newline at end of file diff --git a/doc/Pacemaker_Explained/en-US/images/Policy-Engine-big.dot b/doc/Pacemaker_Explained/en-US/images/Policy-Engine-big.dot deleted file mode 100644 index 40ced22ccc..0000000000 --- a/doc/Pacemaker_Explained/en-US/images/Policy-Engine-big.dot +++ /dev/null @@ -1,83 +0,0 @@ -digraph "g" { -"Cancel drbd0:0_monitor_10000 frigg" -> "drbd0:0_demote_0 frigg" [ style = bold] -"Cancel drbd0:0_monitor_10000 frigg" [ style=bold color="green" fontcolor="black" ] -"Cancel drbd0:1_monitor_12000 odin" -> "drbd0:1_promote_0 odin" [ style = bold] -"Cancel drbd0:1_monitor_12000 odin" [ style=bold color="green" fontcolor="black" ] -"IPaddr0_monitor_5000 odin" [ style=bold color="green" fontcolor="black" ] -"IPaddr0_start_0 odin" -> "IPaddr0_monitor_5000 odin" [ style = bold] -"IPaddr0_start_0 odin" -> "MailTo_start_0 odin" [ style = bold] -"IPaddr0_start_0 odin" -> "group_running_0" [ style = bold] -"IPaddr0_start_0 odin" [ style=bold color="green" fontcolor="black" ] -"MailTo_start_0 odin" -> "group_running_0" [ style = bold] -"MailTo_start_0 odin" [ style=bold color="green" fontcolor="black" ] -"drbd0:0_demote_0 frigg" -> "drbd0:0_monitor_12000 frigg" [ style = bold] -"drbd0:0_demote_0 frigg" -> "ms_drbd_demoted_0" [ style = bold] -"drbd0:0_demote_0 frigg" [ style=bold color="green" fontcolor="black" ] -"drbd0:0_monitor_12000 frigg" [ style=bold color="green" fontcolor="black" ] -"drbd0:0_post_notify_demote_0 frigg" -> "ms_drbd_confirmed-post_notify_demoted_0" [ style = bold] -"drbd0:0_post_notify_demote_0 frigg" [ style=bold color="green" fontcolor="black" ] -"drbd0:0_post_notify_promote_0 frigg" -> "ms_drbd_confirmed-post_notify_promoted_0" [ style = bold] -"drbd0:0_post_notify_promote_0 frigg" [ style=bold color="green" fontcolor="black" ] -"drbd0:0_pre_notify_demote_0 frigg" -> "ms_drbd_confirmed-pre_notify_demote_0" [ style = bold] -"drbd0:0_pre_notify_demote_0 frigg" [ style=bold color="green" fontcolor="black" ] -"drbd0:0_pre_notify_promote_0 frigg" -> "ms_drbd_confirmed-pre_notify_promote_0" [ style = bold] -"drbd0:0_pre_notify_promote_0 frigg" [ style=bold color="green" fontcolor="black" ] -"drbd0:1_monitor_10000 odin" [ style=bold color="green" fontcolor="black" ] -"drbd0:1_post_notify_demote_0 odin" -> "ms_drbd_confirmed-post_notify_demoted_0" [ style = bold] -"drbd0:1_post_notify_demote_0 odin" [ style=bold color="green" fontcolor="black" ] -"drbd0:1_post_notify_promote_0 odin" -> "ms_drbd_confirmed-post_notify_promoted_0" [ style = bold] -"drbd0:1_post_notify_promote_0 odin" [ style=bold color="green" fontcolor="black" ] -"drbd0:1_pre_notify_demote_0 odin" -> "ms_drbd_confirmed-pre_notify_demote_0" [ style = bold] -"drbd0:1_pre_notify_demote_0 odin" [ style=bold color="green" fontcolor="black" ] -"drbd0:1_pre_notify_promote_0 odin" -> "ms_drbd_confirmed-pre_notify_promote_0" [ style = bold] -"drbd0:1_pre_notify_promote_0 odin" [ style=bold color="green" fontcolor="black" ] -"drbd0:1_promote_0 odin" -> "drbd0:1_monitor_10000 odin" [ style = bold] -"drbd0:1_promote_0 odin" -> "ms_drbd_promoted_0" [ style = bold] -"drbd0:1_promote_0 odin" [ style=bold color="green" fontcolor="black" ] -"group_running_0" [ style=bold color="green" fontcolor="orange" ] -"group_start_0" -> "IPaddr0_start_0 odin" [ style = bold] -"group_start_0" -> "MailTo_start_0 odin" [ style = bold] -"group_start_0" -> "group_running_0" [ style = bold] -"group_start_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_confirmed-post_notify_demoted_0" -> "drbd0:0_monitor_12000 frigg" [ style = bold] -"ms_drbd_confirmed-post_notify_demoted_0" -> "drbd0:1_monitor_10000 odin" [ style = bold] -"ms_drbd_confirmed-post_notify_demoted_0" -> "ms_drbd_pre_notify_promote_0" [ style = bold] -"ms_drbd_confirmed-post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_confirmed-post_notify_promoted_0" -> "drbd0:0_monitor_12000 frigg" [ style = bold] -"ms_drbd_confirmed-post_notify_promoted_0" -> "drbd0:1_monitor_10000 odin" [ style = bold] -"ms_drbd_confirmed-post_notify_promoted_0" -> "group_start_0" [ style = bold] -"ms_drbd_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_confirmed-pre_notify_demote_0" -> "ms_drbd_demote_0" [ style = bold] -"ms_drbd_confirmed-pre_notify_demote_0" -> "ms_drbd_post_notify_demoted_0" [ style = bold] -"ms_drbd_confirmed-pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_confirmed-pre_notify_promote_0" -> "ms_drbd_post_notify_promoted_0" [ style = bold] -"ms_drbd_confirmed-pre_notify_promote_0" -> "ms_drbd_promote_0" [ style = bold] -"ms_drbd_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_demote_0" -> "drbd0:0_demote_0 frigg" [ style = bold] -"ms_drbd_demote_0" -> "ms_drbd_demoted_0" [ style = bold] -"ms_drbd_demote_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_demoted_0" -> "ms_drbd_post_notify_demoted_0" [ style = bold] -"ms_drbd_demoted_0" -> "ms_drbd_promote_0" [ style = bold] -"ms_drbd_demoted_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_post_notify_demoted_0" -> "drbd0:0_post_notify_demote_0 frigg" [ style = bold] -"ms_drbd_post_notify_demoted_0" -> "drbd0:1_post_notify_demote_0 odin" [ style = bold] -"ms_drbd_post_notify_demoted_0" -> "ms_drbd_confirmed-post_notify_demoted_0" [ style = bold] -"ms_drbd_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_post_notify_promoted_0" -> "drbd0:0_post_notify_promote_0 frigg" [ style = bold] -"ms_drbd_post_notify_promoted_0" -> "drbd0:1_post_notify_promote_0 odin" [ style = bold] -"ms_drbd_post_notify_promoted_0" -> "ms_drbd_confirmed-post_notify_promoted_0" [ style = bold] -"ms_drbd_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_pre_notify_demote_0" -> "drbd0:0_pre_notify_demote_0 frigg" [ style = bold] -"ms_drbd_pre_notify_demote_0" -> "drbd0:1_pre_notify_demote_0 odin" [ style = bold] -"ms_drbd_pre_notify_demote_0" -> "ms_drbd_confirmed-pre_notify_demote_0" [ style = bold] -"ms_drbd_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_pre_notify_promote_0" -> "drbd0:0_pre_notify_promote_0 frigg" [ style = bold] -"ms_drbd_pre_notify_promote_0" -> "drbd0:1_pre_notify_promote_0 odin" [ style = bold] -"ms_drbd_pre_notify_promote_0" -> "ms_drbd_confirmed-pre_notify_promote_0" [ style = bold] -"ms_drbd_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_promote_0" -> "drbd0:1_promote_0 odin" [ style = bold] -"ms_drbd_promote_0" [ style=bold color="green" fontcolor="orange" ] -"ms_drbd_promoted_0" -> "group_start_0" [ style = bold] -"ms_drbd_promoted_0" -> "ms_drbd_post_notify_promoted_0" [ style = bold] -"ms_drbd_promoted_0" [ style=bold color="green" fontcolor="orange" ] -} diff --git a/doc/Pacemaker_Explained/en-US/images/Policy-Engine-big.svg b/doc/Pacemaker_Explained/en-US/images/Policy-Engine-big.svg deleted file mode 100644 index 7964fcfd30..0000000000 --- a/doc/Pacemaker_Explained/en-US/images/Policy-Engine-big.svg +++ /dev/null @@ -1,418 +0,0 @@ - - - - - - -g - - -Cancel drbd0:0_monitor_10000 frigg - -Cancel drbd0:0_monitor_10000 frigg - - -drbd0:0_demote_0 frigg - -drbd0:0_demote_0 frigg - - -Cancel drbd0:0_monitor_10000 frigg->drbd0:0_demote_0 frigg - - - - -drbd0:0_monitor_12000 frigg - -drbd0:0_monitor_12000 frigg - - -drbd0:0_demote_0 frigg->drbd0:0_monitor_12000 frigg - - - - -ms_drbd_demoted_0 - -ms_drbd_demoted_0 - - -drbd0:0_demote_0 frigg->ms_drbd_demoted_0 - - - - -Cancel drbd0:1_monitor_12000 odin - -Cancel drbd0:1_monitor_12000 odin - - -drbd0:1_promote_0 odin - -drbd0:1_promote_0 odin - - -Cancel drbd0:1_monitor_12000 odin->drbd0:1_promote_0 odin - - - - -drbd0:1_monitor_10000 odin - -drbd0:1_monitor_10000 odin - - -drbd0:1_promote_0 odin->drbd0:1_monitor_10000 odin - - - - -ms_drbd_promoted_0 - -ms_drbd_promoted_0 - - -drbd0:1_promote_0 odin->ms_drbd_promoted_0 - - - - -IPaddr0_monitor_5000 odin - -IPaddr0_monitor_5000 odin - - -IPaddr0_start_0 odin - -IPaddr0_start_0 odin - - -IPaddr0_start_0 odin->IPaddr0_monitor_5000 odin - - - - -MailTo_start_0 odin - -MailTo_start_0 odin - - -IPaddr0_start_0 odin->MailTo_start_0 odin - - - - -group_running_0 - -group_running_0 - - -IPaddr0_start_0 odin->group_running_0 - - - - -MailTo_start_0 odin->group_running_0 - - - - -ms_drbd_post_notify_demoted_0 - -ms_drbd_post_notify_demoted_0 - - -ms_drbd_demoted_0->ms_drbd_post_notify_demoted_0 - - - - -ms_drbd_promote_0 - -ms_drbd_promote_0 - - -ms_drbd_demoted_0->ms_drbd_promote_0 - - - - -drbd0:0_post_notify_demote_0 frigg - -drbd0:0_post_notify_demote_0 frigg - - -ms_drbd_confirmed-post_notify_demoted_0 - -ms_drbd_confirmed-post_notify_demoted_0 - - -drbd0:0_post_notify_demote_0 frigg->ms_drbd_confirmed-post_notify_demoted_0 - - - - -ms_drbd_confirmed-post_notify_demoted_0->drbd0:0_monitor_12000 frigg - - - - -ms_drbd_confirmed-post_notify_demoted_0->drbd0:1_monitor_10000 odin - - - - -ms_drbd_pre_notify_promote_0 - -ms_drbd_pre_notify_promote_0 - - -ms_drbd_confirmed-post_notify_demoted_0->ms_drbd_pre_notify_promote_0 - - - - -drbd0:0_post_notify_promote_0 frigg - -drbd0:0_post_notify_promote_0 frigg - - -ms_drbd_confirmed-post_notify_promoted_0 - -ms_drbd_confirmed-post_notify_promoted_0 - - -drbd0:0_post_notify_promote_0 frigg->ms_drbd_confirmed-post_notify_promoted_0 - - - - -ms_drbd_confirmed-post_notify_promoted_0->drbd0:0_monitor_12000 frigg - - - - -ms_drbd_confirmed-post_notify_promoted_0->drbd0:1_monitor_10000 odin - - - - -group_start_0 - -group_start_0 - - -ms_drbd_confirmed-post_notify_promoted_0->group_start_0 - - - - -drbd0:0_pre_notify_demote_0 frigg - -drbd0:0_pre_notify_demote_0 frigg - - -ms_drbd_confirmed-pre_notify_demote_0 - -ms_drbd_confirmed-pre_notify_demote_0 - - -drbd0:0_pre_notify_demote_0 frigg->ms_drbd_confirmed-pre_notify_demote_0 - - - - -ms_drbd_demote_0 - -ms_drbd_demote_0 - - -ms_drbd_confirmed-pre_notify_demote_0->ms_drbd_demote_0 - - - - -ms_drbd_confirmed-pre_notify_demote_0->ms_drbd_post_notify_demoted_0 - - - - -drbd0:0_pre_notify_promote_0 frigg - -drbd0:0_pre_notify_promote_0 frigg - - -ms_drbd_confirmed-pre_notify_promote_0 - -ms_drbd_confirmed-pre_notify_promote_0 - - -drbd0:0_pre_notify_promote_0 frigg->ms_drbd_confirmed-pre_notify_promote_0 - - - - -ms_drbd_post_notify_promoted_0 - -ms_drbd_post_notify_promoted_0 - - -ms_drbd_confirmed-pre_notify_promote_0->ms_drbd_post_notify_promoted_0 - - - - -ms_drbd_confirmed-pre_notify_promote_0->ms_drbd_promote_0 - - - - -drbd0:1_post_notify_demote_0 odin - -drbd0:1_post_notify_demote_0 odin - - -drbd0:1_post_notify_demote_0 odin->ms_drbd_confirmed-post_notify_demoted_0 - - - - -drbd0:1_post_notify_promote_0 odin - -drbd0:1_post_notify_promote_0 odin - - -drbd0:1_post_notify_promote_0 odin->ms_drbd_confirmed-post_notify_promoted_0 - - - - -drbd0:1_pre_notify_demote_0 odin - -drbd0:1_pre_notify_demote_0 odin - - -drbd0:1_pre_notify_demote_0 odin->ms_drbd_confirmed-pre_notify_demote_0 - - - - -drbd0:1_pre_notify_promote_0 odin - -drbd0:1_pre_notify_promote_0 odin - - -drbd0:1_pre_notify_promote_0 odin->ms_drbd_confirmed-pre_notify_promote_0 - - - - -ms_drbd_promoted_0->group_start_0 - - - - -ms_drbd_promoted_0->ms_drbd_post_notify_promoted_0 - - - - -group_start_0->IPaddr0_start_0 odin - - - - -group_start_0->MailTo_start_0 odin - - - - -group_start_0->group_running_0 - - - - -ms_drbd_pre_notify_promote_0->drbd0:0_pre_notify_promote_0 frigg - - - - -ms_drbd_pre_notify_promote_0->ms_drbd_confirmed-pre_notify_promote_0 - - - - -ms_drbd_pre_notify_promote_0->drbd0:1_pre_notify_promote_0 odin - - - - -ms_drbd_demote_0->drbd0:0_demote_0 frigg - - - - -ms_drbd_demote_0->ms_drbd_demoted_0 - - - - -ms_drbd_post_notify_demoted_0->drbd0:0_post_notify_demote_0 frigg - - - - -ms_drbd_post_notify_demoted_0->ms_drbd_confirmed-post_notify_demoted_0 - - - - -ms_drbd_post_notify_demoted_0->drbd0:1_post_notify_demote_0 odin - - - - -ms_drbd_post_notify_promoted_0->drbd0:0_post_notify_promote_0 frigg - - - - -ms_drbd_post_notify_promoted_0->ms_drbd_confirmed-post_notify_promoted_0 - - - - -ms_drbd_post_notify_promoted_0->drbd0:1_post_notify_promote_0 odin - - - - -ms_drbd_promote_0->drbd0:1_promote_0 odin - - - - -ms_drbd_pre_notify_demote_0 - -ms_drbd_pre_notify_demote_0 - - -ms_drbd_pre_notify_demote_0->drbd0:0_pre_notify_demote_0 frigg - - - - -ms_drbd_pre_notify_demote_0->ms_drbd_confirmed-pre_notify_demote_0 - - - - -ms_drbd_pre_notify_demote_0->drbd0:1_pre_notify_demote_0 odin - - - - - diff --git a/doc/Pacemaker_Explained/en-US/images/Policy-Engine-small.dot b/doc/Pacemaker_Explained/en-US/images/Policy-Engine-small.dot deleted file mode 100644 index 3fef81e789..0000000000 --- a/doc/Pacemaker_Explained/en-US/images/Policy-Engine-small.dot +++ /dev/null @@ -1,31 +0,0 @@ - digraph "g" { -"rsc1_monitor_0 pcmk-2" -> "probe_complete pcmk-2" [ style = bold] -"rsc1_monitor_0 pcmk-2" [ style=bold color="green" fontcolor="black" ] -"rsc1_stop_0 pcmk-1" [ style=dashed color="red" fontcolor="black" ] -"rsc1_start_0 pcmk-2" [ style=dashed color="red" fontcolor="black" ] -"rsc1_stop_0 pcmk-1" -> "rsc1_start_0 pcmk-2" [ style = dashed ] -"rsc1_stop_0 pcmk-1" -> "all_stopped" [ style = dashed ] -"probe_complete" -> "rsc1_start_0 pcmk-2" [ style = dashed ] - -"rsc2_monitor_0 pcmk-2" -> "probe_complete pcmk-2" [ style = bold] -"rsc2_monitor_0 pcmk-2" [ style=bold color="green" fontcolor="black" ] -"rsc2_stop_0 pcmk-1" [ style=dashed color="red" fontcolor="black" ] -"rsc2_start_0 pcmk-2" [ style=dashed color="red" fontcolor="black" ] -"rsc2_stop_0 pcmk-1" -> "rsc2_start_0 pcmk-2" [ style = dashed ] -"rsc2_stop_0 pcmk-1" -> "all_stopped" [ style = dashed ] -"probe_complete" -> "rsc2_start_0 pcmk-2" [ style = dashed ] - -"rsc3_monitor_0 pcmk-2" -> "probe_complete pcmk-2" [ style = bold] -"rsc3_monitor_0 pcmk-2" [ style=bold color="green" fontcolor="black" ] -"rsc3_stop_0 pcmk-1" [ style=dashed color="blue" fontcolor="orange" ] -"rsc3_start_0 pcmk-2" [ style=dashed color="blue" fontcolor="black" ] -"rsc3_stop_0 pcmk-1" -> "all_stopped" [ style = dashed ] -"probe_complete" -> "rsc3_start_0 pcmk-2" [ style = dashed ] - -"probe_complete pcmk-2" -> "probe_complete" [ style = bold] -"probe_complete pcmk-2" [ style=bold color="green" fontcolor="black" ] -"probe_complete" [ style=bold color="green" fontcolor="orange" ] - -"all_stopped" [ style=dashed color="red" fontcolor="orange" ] - -} diff --git a/doc/Pacemaker_Explained/en-US/images/Policy-Engine-small.svg b/doc/Pacemaker_Explained/en-US/images/Policy-Engine-small.svg deleted file mode 100644 index a020d564f8..0000000000 --- a/doc/Pacemaker_Explained/en-US/images/Policy-Engine-small.svg +++ /dev/null @@ -1,133 +0,0 @@ - - - - - - -g - - -rsc1_monitor_0 pcmk-2 - -rsc1_monitor_0 pcmk-2 - - -probe_complete pcmk-2 - -probe_complete pcmk-2 - - -rsc1_monitor_0 pcmk-2->probe_complete pcmk-2 - - - - -probe_complete - -probe_complete - - -probe_complete pcmk-2->probe_complete - - - - -rsc1_stop_0 pcmk-1 - -rsc1_stop_0 pcmk-1 - - -rsc1_start_0 pcmk-2 - -rsc1_start_0 pcmk-2 - - -rsc1_stop_0 pcmk-1->rsc1_start_0 pcmk-2 - - - - -all_stopped - -all_stopped - - -rsc1_stop_0 pcmk-1->all_stopped - - - - -probe_complete->rsc1_start_0 pcmk-2 - - - - -rsc2_start_0 pcmk-2 - -rsc2_start_0 pcmk-2 - - -probe_complete->rsc2_start_0 pcmk-2 - - - - -rsc3_start_0 pcmk-2 - -rsc3_start_0 pcmk-2 - - -probe_complete->rsc3_start_0 pcmk-2 - - - - -rsc2_monitor_0 pcmk-2 - -rsc2_monitor_0 pcmk-2 - - -rsc2_monitor_0 pcmk-2->probe_complete pcmk-2 - - - - -rsc2_stop_0 pcmk-1 - -rsc2_stop_0 pcmk-1 - - -rsc2_stop_0 pcmk-1->all_stopped - - - - -rsc2_stop_0 pcmk-1->rsc2_start_0 pcmk-2 - - - - -rsc3_monitor_0 pcmk-2 - -rsc3_monitor_0 pcmk-2 - - -rsc3_monitor_0 pcmk-2->probe_complete pcmk-2 - - - - -rsc3_stop_0 pcmk-1 - -rsc3_stop_0 pcmk-1 - - -rsc3_stop_0 pcmk-1->all_stopped - - - - - diff --git a/doc/Pacemaker_Explained/en-US/images/pcmk-internals.svg b/doc/Pacemaker_Explained/en-US/images/pcmk-internals.svg deleted file mode 100644 index a62548b783..0000000000 --- a/doc/Pacemaker_Explained/en-US/images/pcmk-internals.svg +++ /dev/null @@ -1,1158 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - Pacemaker Internals - - - - - - CRMd - - - STONITHd - - CIB - - PEngine - - LRMd - - Cluster Abstraction Layer - - Corosync - - CCM - or - - Heartbeat - - - - - - - - - - - - - - Linux-HA Project - - Pacemaker Project - - - - diff --git a/doc/Pacemaker_Explained/en-US/images/pcmk-overview.svg b/doc/Pacemaker_Explained/en-US/images/pcmk-overview.svg deleted file mode 100644 index 9fb022db44..0000000000 --- a/doc/Pacemaker_Explained/en-US/images/pcmk-overview.svg +++ /dev/null @@ -1,855 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - Pacemaker 10,000ft - - - - - - - - Cluster Resource Manager - - - - - - Local Resource Manager - - - - - Resource Agents - - Messaging & Membership - - - - - - diff --git a/doc/Pacemaker_Explained/en-US/images/pcmk-stack.svg b/doc/Pacemaker_Explained/en-US/images/pcmk-stack.svg deleted file mode 100644 index eeb8dead73..0000000000 --- a/doc/Pacemaker_Explained/en-US/images/pcmk-stack.svg +++ /dev/null @@ -1,909 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - cLVM2 - - GFS2 - - - OCFS2 - - - - Distributed Lock Manager - - - CoroSync - - - Pacemaker - - Pacemaker Stack - Build Dependency - - - - - - - - - - - Resource Agents - - - - - - Cluster Glue - - - - - - - - - - - diff --git a/doc/crm-flowchart.fig b/doc/crm-flowchart.fig deleted file mode 100644 index 6f778cb646..0000000000 --- a/doc/crm-flowchart.fig +++ /dev/null @@ -1,335 +0,0 @@ -#FIG 3.2 -Landscape -Center -Metric -A4 -59.40 -Single --2 -1200 2 -6 1620 1665 2970 2430 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 2925 2385 2925 1890 1845 1890 1845 2385 2925 2385 -2 4 1 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 2835 2295 1755 2295 1755 1800 2835 1800 2835 2295 -2 4 1 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 2745 2205 1665 2205 1665 1710 2745 1710 2745 2205 -4 1 0 50 0 14 14 0.0000 4 120 360 2340 2115 RAs\001 --6 -6 6255 2520 7785 3375 -6 6345 2610 7695 3285 -4 1 0 50 0 14 14 0.0000 4 135 840 7020 2745 Cluster\001 -4 1 0 50 0 14 14 0.0000 4 135 1320 7020 3000 Information\001 -4 1 0 50 0 14 14 0.0000 4 120 480 7020 3255 Base\001 --6 -6 6255 2520 7785 3375 -2 4 0 2 0 7 50 0 -1 0.000 0 0 11 0 0 5 - 7740 3330 7740 2565 6300 2565 6300 3330 7740 3330 --6 --6 -6 7875 2520 8820 3150 -6 7875 2520 8820 3150 -2 4 0 2 0 7 50 0 -1 0.000 0 0 12 0 0 5 - 8773 3102 8773 2568 7922 2568 7922 3102 8773 3102 --6 -4 1 0 50 0 14 14 0.0000 4 180 720 8348 2762 Policy\001 -4 1 0 50 0 14 14 0.0000 4 180 720 8348 3037 Engine\001 --6 -6 8910 2520 10665 2925 -2 4 0 2 0 7 50 0 -1 0.000 0 0 11 0 0 5 - 10620 2880 10620 2565 8955 2565 8955 2880 10620 2880 -4 1 0 50 0 14 14 0.0000 4 135 1440 9765 2790 Transitioner\001 --6 -6 6480 1620 10305 2025 -2 4 0 2 0 7 50 0 -1 0.000 0 0 11 0 0 5 - 10260 1980 10260 1665 6525 1665 6525 1980 10260 1980 -4 1 0 50 0 14 16 0.0000 4 195 3600 8415 1890 Cluster Resource Manager\001 --6 -6 7875 4725 9450 5130 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 9405 5085 9405 4770 7920 4770 7920 5085 9405 5085 -4 1 0 50 0 14 16 0.0000 4 150 1350 8685 4995 heartbeat\001 --6 -6 8730 4095 9990 4455 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 9945 4410 9945 4140 8775 4140 8775 4410 9945 4410 -4 1 0 50 0 14 14 0.0000 4 180 1080 9360 4320 Messaging\001 --6 -6 7200 3825 8640 4680 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 8595 4635 8595 3870 7245 3870 7245 4635 8595 4635 -4 1 0 50 0 14 14 0.0000 4 120 1080 7920 4050 Concensus\001 -4 1 0 50 0 14 14 0.0000 4 135 840 7920 4305 Cluster\001 -4 1 0 50 0 14 14 0.0000 4 180 1200 7920 4560 Membership\001 --6 -6 12465 1575 13815 2340 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 13770 2295 13770 1800 12690 1800 12690 2295 13770 2295 -2 4 1 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 13680 2205 12600 2205 12600 1710 13680 1710 13680 2205 -2 4 1 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 13590 2115 12510 2115 12510 1620 13590 1620 13590 2115 -4 1 0 50 0 14 14 0.0000 4 120 360 13185 2025 RAs\001 --6 -6 17325 1530 21150 1935 -2 4 0 2 0 7 50 0 -1 0.000 0 0 11 0 0 5 - 21105 1890 21105 1575 17370 1575 17370 1890 21105 1890 -4 1 0 50 0 14 16 0.0000 4 195 3600 19260 1800 Cluster Resource Manager\001 --6 -6 18720 4635 20295 5040 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 20250 4995 20250 4680 18765 4680 18765 4995 20250 4995 -4 1 0 50 0 14 16 0.0000 4 150 1350 19530 4905 heartbeat\001 --6 -6 19575 4005 20835 4365 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 20790 4320 20790 4050 19620 4050 19620 4320 20790 4320 -4 1 0 50 0 14 14 0.0000 4 180 1080 20205 4230 Messaging\001 --6 -6 18045 3735 19485 4590 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 19440 4545 19440 3780 18090 3780 18090 4545 19440 4545 -4 1 0 50 0 14 14 0.0000 4 120 1080 18765 3960 Concensus\001 -4 1 0 50 0 14 14 0.0000 4 135 840 18765 4215 Cluster\001 -4 1 0 50 0 14 14 0.0000 4 180 1200 18765 4470 Membership\001 --6 -6 18315 2115 19845 2970 -6 18405 2205 19755 2880 -4 1 0 50 0 14 14 0.0000 4 135 840 19080 2340 Cluster\001 -4 1 0 50 0 14 14 0.0000 4 135 1320 19080 2595 Information\001 -4 1 0 50 0 14 14 0.0000 4 120 480 19080 2850 Base\001 --6 -6 18315 2115 19845 2970 -2 4 0 2 0 7 50 0 -1 0.000 0 0 11 0 0 5 - 19800 2925 19800 2160 18360 2160 18360 2925 19800 2925 --6 --6 -6 6750 8370 8010 9090 -4 1 0 50 0 14 14 0.0000 4 120 1080 7380 8505 Concensus\001 -4 1 0 50 0 14 14 0.0000 4 135 840 7380 8760 Cluster\001 -4 1 0 50 0 14 14 0.0000 4 180 1200 7380 9015 Membership\001 --6 -6 6300 9945 7830 10800 -2 4 0 2 0 7 50 0 -1 0.000 0 0 11 0 0 5 - 6345 9990 6345 10755 7785 10755 7785 9990 6345 9990 --6 -6 6390 10035 7740 10710 -4 1 0 50 0 14 14 0.0000 4 135 840 7065 10170 Cluster\001 -4 1 0 50 0 14 14 0.0000 4 135 1320 7065 10425 Information\001 -4 1 0 50 0 14 14 0.0000 4 120 480 7065 10680 Base\001 --6 -6 3240 1755 4905 2250 -2 4 0 2 0 7 50 0 -1 6.000 0 0 15 0 0 5 - 4859 2222 4859 1783 3286 1783 3286 2222 4859 2222 -4 1 0 50 0 14 14 0.0000 4 135 1320 4095 1980 Executioner\001 -4 1 0 50 0 12 14 0.0000 4 165 1080 4095 2160 (STONITH)\001 --6 -6 14085 1710 15750 2205 -2 4 0 2 0 7 50 0 -1 6.000 0 0 15 0 0 5 - 15704 2177 15704 1738 14131 1738 14131 2177 15704 2177 -4 1 0 50 0 14 14 0.0000 4 135 1320 14940 1935 Executioner\001 -4 1 0 50 0 12 14 0.0000 4 165 1080 14940 2115 (STONITH)\001 --6 -6 10485 10710 12150 11205 -2 4 0 2 0 7 50 0 -1 6.000 0 0 15 0 0 5 - 12104 11177 12104 10738 10531 10738 10531 11177 12104 11177 -4 1 0 50 0 14 14 0.0000 4 135 1320 11340 10935 Executioner\001 -4 1 0 50 0 12 14 0.0000 4 165 1080 11340 11115 (STONITH)\001 --6 -6 15300 4320 17415 4995 -2 2 3 2 1 7 50 0 -1 6.000 0 0 -1 0 0 5 - 15345 4365 17370 4365 17370 4950 15345 4950 15345 4365 -4 1 1 50 0 14 16 0.0000 4 150 1950 16380 4590 Adminstrative\001 -4 1 1 50 0 14 16 0.0000 4 180 1050 16380 4875 request\001 --6 -2 1 0 4 0 7 50 0 -1 10.000 0 0 -1 0 0 2 - 1350 6300 21600 6300 -2 1 1 4 0 7 50 0 -1 10.000 0 0 -1 0 0 2 - 1845 6795 21780 6795 -2 1 1 4 0 7 50 0 -1 10.000 0 0 -1 0 0 2 - 8775 6795 8775 5085 -2 1 1 4 0 7 50 0 -1 10.000 0 0 -1 0 0 2 - 19755 4995 19755 6795 -2 1 0 4 0 7 50 0 -1 10.000 0 0 -1 0 0 2 - 19350 6255 19350 4995 -2 1 0 4 0 7 50 0 -1 10.000 0 0 -1 0 0 2 - 8415 6300 8415 5085 -2 1 1 4 0 7 50 0 -1 10.000 0 0 -1 0 0 2 - 6750 7920 6750 6795 -2 1 0 4 0 7 50 0 -1 10.000 0 0 -1 0 0 2 - 6390 7920 6390 6300 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 5040 3195 5040 2880 1575 2880 1575 3195 5040 3195 -2 1 1 2 0 7 50 0 -1 6.000 0 0 11 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 2430 2880 2430 2385 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 5130 1620 5130 3285 1485 3285 1485 1620 5130 1620 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 10710 3465 10710 1530 6165 1530 6165 3465 10710 3465 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 10035 5175 10035 3780 7155 3780 7155 5175 10035 5175 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 15885 3105 15885 2790 12420 2790 12420 3105 15885 3105 -2 1 1 2 0 7 50 0 -1 6.000 0 0 11 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 13275 2790 13275 2295 -2 1 1 2 0 7 50 0 -1 6.000 0 0 11 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 14850 2790 14850 2160 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 15975 1530 15975 3195 12330 3195 12330 1530 15975 1530 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 20880 5085 20880 3690 18000 3690 18000 5085 20880 5085 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 21195 3015 21195 1485 17280 1485 17280 3015 21195 3015 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 21375 5220 21375 900 12150 900 12150 5220 21375 5220 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 10260 9810 10260 10125 13725 10125 13725 9810 10260 9810 -2 1 1 2 0 7 50 0 -1 6.000 0 0 11 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 12870 10125 12870 10620 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 10170 11385 10170 9720 13815 9720 13815 11385 10170 11385 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 5265 7830 5265 9225 8145 9225 8145 7830 5265 7830 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 5355 8595 5355 8865 6525 8865 6525 8595 5355 8595 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 5895 7920 5895 8235 7380 8235 7380 7920 5895 7920 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 6705 8370 6705 9135 8055 9135 8055 8370 6705 8370 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 4725 7650 4725 11970 13950 11970 13950 7650 4725 7650 -2 4 0 2 0 7 50 0 -1 0.000 0 0 11 0 0 5 - 5040 11025 5040 11340 8775 11340 8775 11025 5040 11025 -2 4 2 3 0 7 50 0 -1 2.000 0 0 11 0 0 5 - 4950 11430 4950 9900 8865 9900 8865 11430 4950 11430 -2 1 1 2 0 7 50 0 -1 6.000 0 0 11 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 11295 10125 11295 10755 -2 4 0 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 12375 10620 12375 11115 13455 11115 13455 10620 12375 10620 -2 4 1 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 12465 10710 13545 10710 13545 11205 12465 11205 12465 10710 -2 4 1 2 0 7 50 0 -1 6.000 0 0 11 0 0 5 - 12555 10800 13635 10800 13635 11295 12555 11295 12555 10800 -2 1 1 2 0 7 50 0 -1 6.000 0 0 11 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 4095 2925 4095 2295 -2 2 3 2 1 7 50 0 -1 6.000 0 0 -1 0 0 5 - 12735 4185 14985 4185 14985 4815 12735 4815 12735 4185 -2 4 2 3 2 7 50 0 -1 2.000 0 0 11 0 0 5 - 10935 5445 1305 5445 1305 855 10935 855 10935 5445 -3 2 1 2 4 7 50 0 -1 6.000 0 1 1 3 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 7740 3150 14355 3780 18360 2790 - 0.000 -1.000 0.000 -3 2 1 2 4 7 50 0 -1 6.000 0 1 0 2 - 1 1 1.00 120.00 150.00 - 10620 2745 12420 2970 - 0.000 0.000 -3 2 1 2 4 7 50 0 -1 6.000 0 1 0 2 - 1 1 1.00 120.00 150.00 - 10125 2880 11250 9810 - 0.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 7245 4365 5535 3375 6525 1845 - 0.000 -1.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 5040 3060 6300 2925 - 0.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 7245 4275 6930 4005 6930 3330 - 0.000 -1.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 6975 2565 7740 2340 8325 2565 - 0.000 -1.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 8325 2565 9000 2340 9765 2565 - 0.000 -1.000 0.000 -3 2 1 2 4 7 50 0 -1 6.000 0 1 0 4 - 1 1 1.00 120.00 150.00 - 10035 2565 9450 2115 6480 2205 4905 2880 - 0.000 -1.000 -1.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 18090 4275 16380 3285 17370 1755 - 0.000 -1.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 15885 2970 18315 2565 - 0.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 18090 4185 17820 3645 18405 2925 - 0.000 -1.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 8055 8640 9765 9630 8775 11160 - 0.000 -1.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 1 2 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 10260 9945 7830 10350 - 0.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 8055 8730 8370 9000 7740 9990 - 0.000 -1.000 0.000 -3 2 1 2 4 7 50 0 -1 6.000 0 1 0 5 - 1 1 1.00 120.00 150.00 - 6345 10575 3330 9765 2610 6165 3780 4095 6300 3060 - 0.000 -1.000 -1.000 -1.000 0.000 -3 2 1 2 4 7 50 0 -1 6.000 0 0 1 5 - 1 1 1.00 120.00 150.00 - 6300 10305 4095 8460 3420 5850 4635 3825 6300 3195 - 0.000 -1.000 -1.000 -1.000 0.000 -3 2 1 2 0 7 50 0 -1 6.000 0 1 0 3 - 1 1 1.00 120.00 150.00 - 8055 2610 7785 2475 7380 2565 - 0.000 -1.000 0.000 -3 2 3 2 1 7 50 0 -1 6.000 0 1 0 4 - 1 1 1.00 120.00 150.00 - 16650 4365 17730 1755 13050 1350 10260 1800 - 0.000 -1.000 -1.000 0.000 -3 2 3 2 1 7 50 0 -1 6.000 0 1 1 3 - 1 1 1.00 120.00 150.00 - 1 1 1.00 120.00 150.00 - 14940 4185 17730 1800 18810 2160 - 0.000 -1.000 0.000 -4 1 0 50 0 14 16 0.0000 4 195 3300 3330 3105 Local Resource Manager\001 -4 1 0 50 0 14 14 0.0000 4 120 720 5625 4050 Events\001 -4 1 0 50 0 14 18 0.0000 4 240 4455 5850 1125 Designated Controller Node\001 -4 1 0 50 0 14 16 0.0000 4 195 3300 14175 3015 Local Resource Manager\001 -4 1 0 50 0 14 14 0.0000 4 120 720 16470 3960 Events\001 -4 1 0 50 0 14 18 0.0000 4 240 5280 16650 1215 Any client node in the partition\001 -4 1 0 50 0 14 14 0.0000 4 120 720 9675 8955 Events\001 -4 1 0 50 0 14 16 0.0000 4 150 1350 6660 8145 heartbeat\001 -4 1 0 50 0 14 14 0.0000 4 180 1080 5940 8775 Messaging\001 -4 1 0 50 0 14 16 0.0000 4 195 3600 6885 11250 Cluster Resource Manager\001 -4 1 0 50 0 14 16 0.0000 4 195 3300 12015 10035 Local Resource Manager\001 -4 1 0 50 0 14 14 0.0000 4 120 360 13050 11025 RAs\001 -4 1 0 50 0 14 18 0.0000 4 240 5280 9495 11700 Any client node in the partition\001 -4 2 4 50 0 12 14 0.0000 4 120 720 2970 4920 status\001 -4 2 4 50 0 12 14 0.0000 4 120 840 3105 4680 Gathers\001 -4 0 4 50 0 12 14 0.0000 4 135 3000 10665 5940 Instructs and coordinates\001 -4 0 4 50 0 12 14 0.0000 4 165 1200 3825 4905 Replicates\001 -4 0 4 50 0 12 14 0.0000 4 165 1560 3735 5130 configuration\001 -4 1 1 50 0 14 16 0.0000 4 150 1950 13860 4410 Adminstrative\001 -4 1 1 50 0 14 16 0.0000 4 195 2100 13860 4695 status inquiry\001 diff --git a/doc/executioner.txt b/doc/executioner.txt deleted file mode 100644 index 2ad41e1256..0000000000 --- a/doc/executioner.txt +++ /dev/null @@ -1,115 +0,0 @@ -DRAFT! DRAFT! DRAFT! DRAFT! DRAFT! DRAFT! DRAFT! DRAFT! DRAFT! DRAFT! DRAFT! - -NOTICE: Some ideas in this paper aren't yet well sorted. Some ideas aren't -complete. Some phrasings I'm myself not happy with yet. Some ideas need -further explanation. Most of the ideas presented are not final yet. It is -mostly a braindump. - -And did I say yet that this is still a DRAFT!!!!!! ? - - -Title: The Executioner -Author: Lars Marowsky-Brée -Acknowledgements: David Brower, Oracle - Alan Robertson, IBM - - -1. Summary - -Every node runs an instance of the fencing daemon ("executioner"). This daemon -knows which fencing devices are currently reachable and which nodes can be -fenced by them - ie, the current topology of the fencing mechanisms - and will -execute such requests on behalf of the CRM and report success or failure. - -A succesful fencing operation shall imply that the target node of the request -can no longer access any shared resources in the cluster, until it has -"properly rejoined the cluster". - - -2. Fencing topology information - -(Note: modelled after the STONITH model by alanr in heartbeat) - -2.1. Static configuration data - -The mechanisms available for fencing need to be configured on each node. - -Provision should be made that this file can be the same on all nodes (to ease -configuration deployment). It shall be made easy to configure a device for a -list of nodes or all nodes. - -TODO: Can this configuration also be stored in the CIB configuration part? -This would collide with the concept that every node is the authoritive source -of information about itself. - - -2.2. Runtime topology - -Every device needs to support a low-latency "ping" operation, which shall -verify whether it can currently be reached from the local node; this shall -preferrably not be affected by concurrent access. - -The devices can either autodiscover their targets (ie, like via STONITH -devices), or have to provide means of configuring this list; the list shall -only be queried from the device on explicit request by the CRM. - -The CRM shall be allowed to assume that the same device can fence the same set -of nodes for all clients. - - -3. Interaction with the CRM - -3.1. Policies - -The CRM will be responsible for retrying failed commands; the Executioner -shall only make exactly one attempt. It shall not retry the request on another -device in particular; it is permissible to retry the command on the same -device if it seems like an intermediate failure. - - -3.2. Queries/Commands issued - -3.2.1. Device reachable - -The Executioner shall verify whether the given device is still reachable by -the local node at this point in time. - -The verification shall be low-latency and low weight and allow for concurrent -access from multiple nodes (if appropriate, ie for network switches). - - -3.2.2. Targets fenceable via device Y - -The node shall contact the device and return the list of nodes which it can -fence. - -The CRM will ensure that no other node in the partition is accessing device Y -right now. - -Results: - 0 Success; list of targets included - 1 Failed; device not reached - 2 Failed; device failed to return list of targets - - -3.2.3. Fencing request to fence node X via device Y - -This is a blocking, synchronous call. The CRM will ensure that no other node -in the partition is accessing device Y right now. - -(This is an issue for certain network powerswitches) - -The result code need to distinguish between: - - 0 Fencing request succeeded - 1 Failed: device could not be reached, potential network issue - 2 Failed: device tried, but failed to acknowledge success - 3 Failed: interal device failure - -TODO: So much differentiation really necessary? Yes, it can help identify - quickly whether it is sensible to retry the fencing request via - another node to the same device, or whether the next fencing device - should be tried immediately; maybe that is overkill. - - - diff --git a/doc/msg-schema.txt b/doc/msg-schema.txt deleted file mode 100644 index f07c6aef9a..0000000000 --- a/doc/msg-schema.txt +++ /dev/null @@ -1,166 +0,0 @@ -Background -################## -First of all, go look at the diagram (comms.gif), read this, then -look at the diagram again. - -Next, some terminology... -Here I will use CRM to refer to the light blue section. That is, -the entire collection of processes/daemons/modules on a node that, -as a whole, manage resources in the cluster. CRMd refers to one -of the dark blue boxes. It is the "master subsystem" if you like. -Its role is to co-ordinate the actions of all the other pieces of -the puzzle, including those on other nodes. - -Key points from the diagram: -- All communications with the CRM are done with Heartbeat messages - routed through the CRMd. These messages contain a text - representation of an XML document, the schema of which is outlined - at the end of this document. -- All communications internal to the CRM (ie. between its subsystems) - is performed with IPC messages. Again all messages are routed via - the CRMd and contain the same XML documents as Heartbeat messages. -- All admin clients (eventually) end up sending Heartbeat messages - and are thus subject to existing HA client security is available. -- The RPC layer allows the cluster to be controled from non-member - hosts (subject to RPC security which is available for free). -- The option of synchronous or asynchronous RPC calls will be provided. - This will probably be in the form of a flag sent as part of the - function call. - -Advantages: -- The only source of "requests" is the CRMd which means it *never* has - to forward on "request" messages for any of it sub-systems. This is - useful for the security of the system (see security.txt). -- Potentially, most CRM<-->CRM communications can be replaced with RPC - calls. -- We are able to re-use existing security mechanisms (IPC, HA, RPC, - unix_auth via RPC) to protect the system. - -Message scenarios: -################## -There are really only 3 messaging scenarios in this system (exluding -broadcast vs. point-to-point). Again this is nice as it keeps down the -number of "special cases". - -1) Sub-system <--(IPC)--> CRM <--(IPC)--> Sub-system -2) Sub-system <--(IPC)--> Local CRM <--(Heartbeat)--> Remote CRM <--(IPC)--> Sub-system -3) Admin Client <--(Heartbeat Broadcast)--> Remote CRM <--(IPC)--> Sub-system - -Message examples: -################## -1.1) the DC telling the local LRM to start a resource -1.2) the LRM asking the CIB about a resource - -2.1) the DC telling a remote LRM to start a resource -2.2) the DC asking (all) the CIB(s) to provide their view of the world - -3.1) an admin request to add/remove/modify a resource -3.2) an admin request to force a failover of a resource or a -recomputation of the resource dependencies. - -Message Notes: -################## -Messages may be sent to the CRM from local sub-systems via IPC or from -other HA clients via Heartbeat. It is then the responsibility of the -CRM to unpack the message and pass it on to the correct sub-system. If -the destination sub-system is the DC and the DC is not running on the -current node, the message is discarded without error. - -Where the DC receives a message from another node, it will also keep -track of the sending host and the reference number so that it can direct -the replies appropriately. The exception to this is where the message -is from the DC. - -Messages to the DC are *always* sent as broadcast messages and the DC -*must always* acknowledge the message with either the results of the -message or a "thankyou" message. The reason for this is that the DC may -change or a DC election may be in progress. The implication of this is -that the sender should always set a timer and resend dc_messages if they -have not been acknowledged. The DC will be able to detect duplicates by -examining the destination sub-system and the reference number and we -will rely on HA to ensure the delivery of DC responses. - -All messages are full crm_messages. I toyed with only sending the *_request -or *_response piece of the message to and/or from the relevant sub-systems, -but it just got messy. This way, the routing role of the CRM is much easier. -And easier equals lower complexity, which means less bugs, which is good for -everyone. - -Schema Notes: -################### - -Key Attributes -=============== - -reference: provides the ability to track which request a responce - is in relation to and where the local CRM should send it. -*_filter: allow the operation to be limited to a particular type, - id and/or priority -timeout: allows the receiver to know how long the sender is - expecting the task to take so we can act and report back - accordingly. - - -Attribute values -================= -Where the list ends with |... , the complete list of possibilities will be -fleshed out at a later date. - -Message Schema: -################### - - - - - - - - filter_type? #CDATA - filter_id? #CDATA> - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/Clusters_from_Scratch/en-US/images/Console.png b/doc/shared/en-US/images/Console.png similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/Console.png rename to doc/shared/en-US/images/Console.png diff --git a/doc/Clusters_from_Scratch/en-US/images/Installer.png b/doc/shared/en-US/images/Installer.png similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/Installer.png rename to doc/shared/en-US/images/Installer.png diff --git a/doc/Clusters_from_Scratch/en-US/images/Network.png b/doc/shared/en-US/images/Network.png similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/Network.png rename to doc/shared/en-US/images/Network.png diff --git a/doc/Clusters_from_Scratch/en-US/images/Policy-Engine-big.dot b/doc/shared/en-US/images/Policy-Engine-big.dot similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/Policy-Engine-big.dot rename to doc/shared/en-US/images/Policy-Engine-big.dot diff --git a/doc/Clusters_from_Scratch/en-US/images/Policy-Engine-big.svg b/doc/shared/en-US/images/Policy-Engine-big.svg similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/Policy-Engine-big.svg rename to doc/shared/en-US/images/Policy-Engine-big.svg diff --git a/doc/Clusters_from_Scratch/en-US/images/Policy-Engine-small.dot b/doc/shared/en-US/images/Policy-Engine-small.dot similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/Policy-Engine-small.dot rename to doc/shared/en-US/images/Policy-Engine-small.dot diff --git a/doc/Clusters_from_Scratch/en-US/images/Policy-Engine-small.svg b/doc/shared/en-US/images/Policy-Engine-small.svg similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/Policy-Engine-small.svg rename to doc/shared/en-US/images/Policy-Engine-small.svg diff --git a/doc/Clusters_from_Scratch/en-US/images/Welcome.png b/doc/shared/en-US/images/Welcome.png similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/Welcome.png rename to doc/shared/en-US/images/Welcome.png diff --git a/doc/Pacemaker_Explained/en-US/images/pcmk-active-active.svg b/doc/shared/en-US/images/pcmk-active-active.svg similarity index 100% rename from doc/Pacemaker_Explained/en-US/images/pcmk-active-active.svg rename to doc/shared/en-US/images/pcmk-active-active.svg diff --git a/doc/Pacemaker_Explained/en-US/images/pcmk-active-passive.svg b/doc/shared/en-US/images/pcmk-active-passive.svg similarity index 100% rename from doc/Pacemaker_Explained/en-US/images/pcmk-active-passive.svg rename to doc/shared/en-US/images/pcmk-active-passive.svg diff --git a/doc/Clusters_from_Scratch/en-US/images/pcmk-internals.svg b/doc/shared/en-US/images/pcmk-internals.svg similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/pcmk-internals.svg rename to doc/shared/en-US/images/pcmk-internals.svg diff --git a/doc/Clusters_from_Scratch/en-US/images/pcmk-overview.svg b/doc/shared/en-US/images/pcmk-overview.svg similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/pcmk-overview.svg rename to doc/shared/en-US/images/pcmk-overview.svg diff --git a/doc/Pacemaker_Explained/en-US/images/pcmk-shared-failover.svg b/doc/shared/en-US/images/pcmk-shared-failover.svg similarity index 100% rename from doc/Pacemaker_Explained/en-US/images/pcmk-shared-failover.svg rename to doc/shared/en-US/images/pcmk-shared-failover.svg diff --git a/doc/Clusters_from_Scratch/en-US/images/pcmk-stack.svg b/doc/shared/en-US/images/pcmk-stack.svg similarity index 100% rename from doc/Clusters_from_Scratch/en-US/images/pcmk-stack.svg rename to doc/shared/en-US/images/pcmk-stack.svg diff --git a/doc/Pacemaker_Explained/en-US/images/resource-set.png b/doc/shared/en-US/images/resource-set.png similarity index 100% rename from doc/Pacemaker_Explained/en-US/images/resource-set.png rename to doc/shared/en-US/images/resource-set.png diff --git a/doc/Pacemaker_Explained/en-US/images/three-sets-complex.png b/doc/shared/en-US/images/three-sets-complex.png similarity index 100% rename from doc/Pacemaker_Explained/en-US/images/three-sets-complex.png rename to doc/shared/en-US/images/three-sets-complex.png diff --git a/doc/Pacemaker_Explained/en-US/images/three-sets.png b/doc/shared/en-US/images/three-sets.png similarity index 100% rename from doc/Pacemaker_Explained/en-US/images/three-sets.png rename to doc/shared/en-US/images/three-sets.png diff --git a/doc/Pacemaker_Explained/en-US/images/two-sets.png b/doc/shared/en-US/images/two-sets.png similarity index 100% rename from doc/Pacemaker_Explained/en-US/images/two-sets.png rename to doc/shared/en-US/images/two-sets.png diff --git a/fencing/Makefile.am b/fencing/Makefile.am index 7804419722..d3270d8857 100644 --- a/fencing/Makefile.am +++ b/fencing/Makefile.am @@ -1,67 +1,67 @@ # Author: Sun Jiang Dong # Copyright (c) 2004 International Business Machines # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # include $(top_srcdir)/Makefile.common ## binary progs testdir = $(datadir)/$(PACKAGE)/tests/fencing test_SCRIPTS = regression.py halibdir = $(CRM_DAEMON_DIR) halib_PROGRAMS = stonithd stonith-test sbin_PROGRAMS = stonith_admin sbin_SCRIPTS = fence_legacy noinst_HEADERS = internal.h if BUILD_XML_HELP man7_MANS = stonithd.7 endif stonith_test_SOURCES = test.c stonith_test_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/cluster/libcrmcluster.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(CRYPTOLIB) $(CLUSTERLIBS) stonith_admin_SOURCES = admin.c stonith_admin_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/cluster/libcrmcluster.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(CRYPTOLIB) $(CLUSTERLIBS) stonithd_CPPFLAGS = -I$(top_srcdir)/pengine $(AM_CPPFLAGS) stonithd_YFLAGS = -d stonithd_CFLAGS = $(CFLAGS_HARDENED_EXE) stonithd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) stonithd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/cluster/libcrmcluster.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/pengine/libpengine.la \ $(CRYPTOLIB) $(CLUSTERLIBS) stonithd_SOURCES = main.c commands.c remote.c -CLEANFILES = $(man7_MANS) $(man8_MANS) +CLEANFILES = regression.py $(man7_MANS) $(man8_MANS) diff --git a/fencing/regression.py.in b/fencing/regression.py.in index 8e4cf57693..0541622124 100644 --- a/fencing/regression.py.in +++ b/fencing/regression.py.in @@ -1,1427 +1,1427 @@ -#!/usr/bin/python +#!@PYTHON@ """ Regression tests for Pacemaker's stonithd """ # Pacemaker targets compatibility with Python 2.7 and 3.2+ from __future__ import print_function, unicode_literals, absolute_import, division __copyright__ = "Copyright (C) 2012-2018 Andrew Beekhof " __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import sys import subprocess import shlex import time FENCE_DUMMY = "@datadir@/@PACKAGE@/tests/cts/fence_dummy" # These values must be kept in sync with include/crm/crm.h class CrmExit: OK = 0 ERROR = 1 INVALID_PARAM = 2 UNIMPLEMENT_FEATURE = 3 INSUFFICIENT_PRIV = 4 NOT_INSTALLED = 5 NOT_CONFIGURED = 6 NOT_RUNNING = 7 USAGE = 64 DATAERR = 65 NOINPUT = 66 NOUSER = 67 NOHOST = 68 UNAVAILABLE = 69 SOFTWARE = 70 OSERR = 71 OSFILE = 72 CANTCREAT = 73 IOERR = 74 TEMPFAIL = 75 PROTOCOL = 76 NOPERM = 77 CONFIG = 78 FATAL = 100 PANIC = 101 DISCONNECT = 102 SOLO = 103 DIGEST = 104 NOSUCH = 105 QUORUM = 106 UNSAFE = 107 EXISTS = 108 MULTIPLE = 109 OLD = 110 TIMEOUT = 124 MAX = 255 def pipe_output(pipes, stdout=True, stderr=False): """ Wrapper to get text output from pipes regardless of Python version """ output = "" pipe_outputs = pipes.communicate() if sys.version_info < (3,): if stdout: output = output + pipe_outputs[0] if stderr: output = output + pipe_outputs[1] else: if stdout: output = output + pipe_outputs[0].decode(sys.stdout.encoding) if stderr: output = output + pipe_outputs[1].decode(sys.stderr.encoding) return output def output_from_command(command): """ Execute command and return its standard output """ test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE) test.wait() return pipe_output(test).split("\n") def localname(): """ Return the uname of the local host """ our_uname = output_from_command("uname -n") if our_uname: our_uname = our_uname[0] else: our_uname = "localhost" return our_uname def killall(process): """ Kill all instances of a process """ cmd = shlex.split("killall -9 -q %s" % process) test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() class TestError(Exception): """ Base class for exceptions in this module """ pass class ExitCodeError(TestError): """ Exception raised when command exit status is unexpected """ def __init__(self, exit_code): self.exit_code = exit_code def __str__(self): return repr(self.exit_code) class OutputNotFoundError(TestError): """ Exception raised when command output does not contain wanted string """ def __init__(self, exit_code): self.output = output def __str__(self): return repr(self.output) class OutputFoundError(TestError): """ Exception raised when command output contains unwanted string """ def __init__(self, exit_code): self.output = output def __str__(self): return repr(self.output) class Test(object): """ Executor for a single test """ def __init__(self, name, description, verbose=0, with_cpg=0): self.name = name self.description = description self.cmds = [] self.verbose = verbose self.result_txt = "" self.cmd_tool_output = "" self.result_exitcode = CrmExit.OK if with_cpg: self.stonith_options = "-c" self.enable_corosync = 1 else: self.stonith_options = "-s" self.enable_corosync = 0 self.stonith_process = None self.stonith_output = "" self.stonith_patterns = [] self.negative_stonith_patterns = [] self.executed = 0 def __new_cmd(self, cmd, args, exitcode, stdout_match="", no_wait=0, stdout_negative_match="", kill=None): """ Add a command to be executed as part of this test """ self.cmds.append( { "cmd" : cmd, "kill" : kill, "args" : args, "expected_exitcode" : exitcode, "stdout_match" : stdout_match, "stdout_negative_match" : stdout_negative_match, "no_wait" : no_wait, } ) def start_environment(self): """ Prepare the host for executing a test """ # Make sure we are in full control killall("pacemakerd") killall("stonithd") if self.verbose: self.stonith_options = self.stonith_options + " -V" print("Starting stonithd with %s" % self.stonith_options) if os.path.exists("/tmp/stonith-regression.log"): os.remove('/tmp/stonith-regression.log') cmd = "@CRM_DAEMON_DIR@/stonithd %s -l /tmp/stonith-regression.log" % self.stonith_options self.stonith_process = subprocess.Popen(shlex.split(cmd)) time.sleep(1) def clean_environment(self): """ Clean up the host after executing a test """ if self.stonith_process: self.stonith_process.terminate() self.stonith_process.wait() self.stonith_output = "" self.stonith_process = None logfile = io.open('/tmp/stonith-regression.log', 'rt') for line in logfile.readlines(): self.stonith_output = self.stonith_output + line if self.verbose: print("Daemon Output Start") print(self.stonith_output) print("Daemon Output End") os.remove('/tmp/stonith-regression.log') def add_stonith_log_pattern(self, pattern): """ Add a log pattern to expect from this test """ self.stonith_patterns.append(pattern) def add_stonith_neg_log_pattern(self, pattern): """ Add a log pattern that should not occur with this test """ self.negative_stonith_patterns.append(pattern) def add_cmd(self, cmd, args): """ Add a simple command to be executed as part of this test """ self.__new_cmd(cmd, args, CrmExit.OK, "") def add_cmd_no_wait(self, cmd, args): """ Add a simple command to be executed (without waiting) as part of this test """ self.__new_cmd(cmd, args, CrmExit.OK, "", 1) def add_cmd_check_stdout(self, cmd, args, match, no_match=""): """ Add a simple command with expected output to be executed as part of this test """ self.__new_cmd(cmd, args, CrmExit.OK, match, 0, no_match) def add_expected_fail_cmd(self, cmd, args, exitcode=CrmExit.ERROR): """ Add a command to be executed as part of this test and expected to fail """ self.__new_cmd(cmd, args, exitcode, "") def get_exitcode(self): """ Return the exit status of the last test execution """ return self.result_exitcode def print_result(self, filler): """ Print the result of the last test execution """ print("%s%s" % (filler, self.result_txt)) def run_cmd(self, args): """ Execute a command as part of this test """ cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print("\n\nRunning: "+" ".join(cmd)) test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if args['kill']: if self.verbose: print("Also running: "+args['kill']) subprocess.Popen(shlex.split(args['kill'])) if args['no_wait'] == 0: test.wait() else: return CrmExit.OK output = pipe_output(test, stderr=True) if self.verbose: print(output) if test.returncode != args['expected_exitcode']: raise ExitCodeError(test.returncode) if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0: raise OutputNotFoundError(output) if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0: raise OutputFoundError(output) def count_negative_matches(self, outline): """ Return 1 if a line matches patterns that shouldn't have occurred """ count = 0 for line in self.negative_stonith_patterns: if outline.count(line): count = 1 if self.verbose: print("This pattern should not have matched = '%s" % (line)) return count def match_stonith_patterns(self): """ Check test output for expected patterns """ negative_matches = 0 cur = 0 pats = self.stonith_patterns total_patterns = len(self.stonith_patterns) if len(self.stonith_patterns) == 0 and len(self.negative_stonith_patterns) == 0: return for line in self.stonith_output.split("\n"): negative_matches = negative_matches + self.count_negative_matches(line) if len(pats) == 0: continue cur = -1 for pat in pats: cur = cur + 1 if line.count(pats[cur]): del pats[cur] break if len(pats) > 0 or negative_matches: if self.verbose: for pat in pats: print("Pattern Not Matched = '%s'" % pat) msg = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." self.result_txt = msg % (self.name, len(pats), total_patterns, negative_matches) self.result_exitcode = CrmExit.ERROR def set_error(self, step, cmd): """ Record failure of this test """ msg = "FAILURE - '%s' failed at step %d. Command: %s %s" self.result_txt = msg % (self.name, step, cmd['cmd'], cmd['args']) self.result_exitcode = CrmExit.ERROR def run(self): """ Execute this test. """ res = 0 i = 1 self.start_environment() if self.verbose: print("\n--- START TEST - %s" % self.name) self.result_txt = "SUCCESS - '%s'" % (self.name) self.result_exitcode = CrmExit.OK for cmd in self.cmds: try: self.run_cmd(cmd) except ExitCodeError as e: print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode'])) self.set_error(i, cmd); break except OutputNotFoundError as e: print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e)) self.set_error(i, cmd); break except OutputFoundError as e: print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_negative_match'], e)) self.set_error(i, cmd); break if self.verbose: print("Step %d SUCCESS" % (i)) i = i + 1 self.clean_environment() if self.result_exitcode == CrmExit.OK: self.match_stonith_patterns() print(self.result_txt) if self.verbose: print("--- END TEST - %s\n" % self.name) self.executed = 1 return res class Tests(object): """ Collection of all fencing regression tests """ def __init__(self, verbose=0): self.tests = [] self.verbose = verbose self.autogen_corosync_cfg = 0 if not os.path.exists("/etc/corosync/corosync.conf"): self.autogen_corosync_cfg = 1 def new_test(self, name, description, with_cpg=0): """ Create a named test """ test = Test(name, description, self.verbose, with_cpg) self.tests.append(test) return test def print_list(self): """ List all registered tests """ print("\n==== %d TESTS FOUND ====" % (len(self.tests))) print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")) print("%35s - %s" % ("--------------------", "--------------------")) for test in self.tests: print("%35s - %s" % (test.name, test.description)) print("==== END OF LIST ====\n") def start_corosync(self): """ Start the corosync process """ if self.verbose: print("Starting corosync") test = subprocess.Popen("corosync", stdout=subprocess.PIPE) test.wait() time.sleep(10) def run_single(self, name): """ Run a single named test """ for test in self.tests: if test.name == name: test.run() break def run_tests_matching(self, pattern): """ Run all tests whose name matches a pattern """ for test in self.tests: if test.name.count(pattern) != 0: test.run() def run_cpg_only(self): """ Run all corosync-enabled tests """ for test in self.tests: if test.enable_corosync: test.run() def run_no_cpg(self): """ Run all standalone tests """ for test in self.tests: if not test.enable_corosync: test.run() def run_tests(self): """ Run all tests """ for test in self.tests: test.run() def exit(self): """ Exit (with error status code if any test failed) """ for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != CrmExit.OK: sys.exit(CrmExit.ERROR) sys.exit(CrmExit.OK) def print_results(self): """ Print summary of results of executed tests """ failures = 0 success = 0 print("\n\n======= FINAL RESULTS ==========") print("\n--- FAILURE RESULTS:") for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != CrmExit.OK: failures = failures + 1 test.print_result(" ") else: success = success + 1 if failures == 0: print(" None") print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)) def build_api_sanity_tests(self): """ Register tests to verify basic API usage """ verbose_arg = "" if self.verbose: verbose_arg = "-V" test = self.new_test("standalone_low_level_api_test", "Sanity test client api in standalone mode.") test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-t %s" % (verbose_arg)) test = self.new_test("cpg_low_level_api_test", "Sanity test client api using mainloop and cpg.", 1) test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-m %s" % (verbose_arg)) def build_custom_timeout_tests(self): """ Register tests to verify custom timeout usage """ # custom timeout without topology test = self.new_test("cpg_custom_timeout_1", "Verify per device timeouts work as expected without using topology.", 1) test.add_cmd('stonith_admin', '-R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node1 node2 node3"') test.add_cmd('stonith_admin', '-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=1"') test.add_cmd('stonith_admin', '-R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4"') test.add_cmd("stonith_admin", "-F node3 -t 2") # timeout is 2+1+4 = 7 test.add_stonith_log_pattern("Total timeout set to 7") # custom timeout _WITH_ topology test = self.new_test("cpg_custom_timeout_2", "Verify per device timeouts work as expected _WITH_ topology.", 1) test.add_cmd('stonith_admin', '-R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node1 node2 node3"') test.add_cmd('stonith_admin', '-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=1"') test.add_cmd('stonith_admin', '-R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4000"') test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") test.add_cmd("stonith_admin", "-r node3 -i 3 -v false2") test.add_cmd("stonith_admin", "-F node3 -t 2") # timeout is 2+1+4000 = 4003 test.add_stonith_log_pattern("Total timeout set to 4003") def build_fence_merge_tests(self): """ Register tests to verify when fence operations should be merged """ ### Simple test that overlapping fencing operations get merged test = self.new_test("cpg_custom_merge_single", "Verify overlapping identical fencing operations are merged, no fencing levels used.", 1) test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd("stonith_admin", "-F node3 -t 10") ### one merger will happen test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") ### Test that multiple mergers occur test = self.new_test("cpg_custom_merge_multiple", "Verify multiple overlapping identical fencing operations are merged", 1) test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"delay=2\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd("stonith_admin", "-F node3 -t 10") ### 4 mergers should occur test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") ### Test that multiple mergers occur with topologies used test = self.new_test("cpg_custom_merge_with_topology", "Verify multiple overlapping identical fencing operations are merged with fencing levels.", 1) test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false2") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") test.add_cmd("stonith_admin", "-F node3 -t 10") ### 4 mergers should occur test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") test.add_stonith_log_pattern("Operation off of node3 by") def build_fence_no_merge_tests(self): """ Register tests to verify when fence operations should not be merged """ test = self.new_test("cpg_custom_no_merge", "Verify differing fencing operations are not merged", 1) test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3 node2\" ") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false2") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") test.add_cmd_no_wait("stonith_admin", "-F node2 -t 10") test.add_cmd("stonith_admin", "-F node3 -t 10") test.add_stonith_neg_log_pattern("Merging stonith action off for node node3 originating from client") def build_standalone_tests(self): """ Register a grab bag of tests that can be executed in standalone or corosync mode """ test_types = [ { "prefix" : "standalone", "use_cpg" : 0, }, { "prefix" : "cpg", "use_cpg" : 1, }, ] # test what happens when all devices timeout for test_type in test_types: test = self.new_test("%s_fence_multi_device_failure" % test_type["prefix"], "Verify that all devices timeout, a fencing failure is returned.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false3 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") if test_type["use_cpg"] == 1: test.add_expected_fail_cmd("stonith_admin", "-F node3 -t 2", CrmExit.TIMEOUT) test.add_stonith_log_pattern("Total timeout set to 6") else: test.add_expected_fail_cmd("stonith_admin", "-F node3 -t 2", CrmExit.ERROR) test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: ") test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: ") test.add_stonith_log_pattern("for host 'node3' with device 'false3' returned: ") # test what happens when multiple devices can fence a node, but the first device fails. for test_type in test_types: test = self.new_test("%s_fence_device_failure_rollover" % test_type["prefix"], "Verify that when one fence device fails for a node, the others are tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-F node3 -t 2") if test_type["use_cpg"] == 1: test.add_stonith_log_pattern("Total timeout set to 6") # simple topology test for one device for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_simple" % test_type["prefix"], "Verify all fencing devices at a level are used.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v true") test.add_cmd("stonith_admin", "-F node3 -t 2") test.add_stonith_log_pattern("Total timeout set to 2") test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") # add topology, delete topology, verify fencing still works for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_add_remove" % test_type["prefix"], "Verify fencing occurrs after all topology levels are removed", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v true") test.add_cmd("stonith_admin", "-d node3 -i 1") test.add_cmd("stonith_admin", "-F node3 -t 2") test.add_stonith_log_pattern("Total timeout set to 2") test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") # test what happens when the first fencing level has multiple devices. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_device_fails" % test_type["prefix"], "Verify if one device in a level fails, the other is tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R false -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true") test.add_cmd("stonith_admin", "-F node3 -t 20") test.add_stonith_log_pattern("Total timeout set to 40") test.add_stonith_log_pattern("for host 'node3' with device 'false' returned: -201") test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") # test what happens when the first fencing level fails. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_multi_level_fails" % test_type["prefix"], "Verify if one level fails, the next leve is tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") test.add_cmd("stonith_admin", "-F node3 -t 3") test.add_stonith_log_pattern("Total timeout set to 18") test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -201") test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: -201") test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") # test what happens when the first fencing level had devices that no one has registered for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_missing_devices" % test_type["prefix"], "Verify topology can continue with missing devices.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") test.add_cmd("stonith_admin", "-F node3 -t 2") # Test what happens if multiple fencing levels are defined, and then the first one is removed. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_level_removal" % test_type["prefix"], "Verify level removal works.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") # Now remove level 2, verify none of the devices in level two are hit. test.add_cmd("stonith_admin", "-d node3 -i 2") test.add_cmd("stonith_admin", "-F node3 -t 20") test.add_stonith_log_pattern("Total timeout set to 8") test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -201") test.add_stonith_neg_log_pattern("for host 'node3' with device 'false2' returned: ") test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") # Test targeting a topology level by node name pattern. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_level_pattern" % test_type["prefix"], "Verify targeting topology by node name pattern works.", test_type["use_cpg"]) test.add_cmd("stonith_admin", """-R true -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node1 node2 node3" """) test.add_cmd("stonith_admin", """-r '@node.*' -i 1 -v true""") test.add_cmd("stonith_admin", "-F node3 -t 2") test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") # test allowing commas and semicolons as delimiters in pcmk_host_list for test_type in test_types: test = self.new_test("%s_host_list_delimiters" % test_type["prefix"], "Verify commas and semicolons can be used as pcmk_host_list delimiters", test_type["use_cpg"]) test.add_cmd("stonith_admin", """-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node1,node2,node3" """) test.add_cmd("stonith_admin", """-R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=pcmk1;pcmk2;pcmk3" """) test.add_cmd("stonith_admin", "stonith_admin -F node2 -t 2") test.add_cmd("stonith_admin", "stonith_admin -F pcmk3 -t 2") test.add_stonith_log_pattern("for host 'node2' with device 'true1' returned: 0") test.add_stonith_log_pattern("for host 'pcmk3' with device 'true2' returned: 0") # test the stonith builds the correct list of devices that can fence a node. for test_type in test_types: test = self.new_test("%s_list_devices" % test_type["prefix"], "Verify list of devices that can fence a node is correct", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true2", "true1") test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true3", "true1") # simple test of device monitor for test_type in test_types: test = self.new_test("%s_monitor" % test_type["prefix"], "Verify device is reachable", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-Q true1") test.add_cmd("stonith_admin", "-Q false1") test.add_expected_fail_cmd("stonith_admin", "-Q true2", CrmExit.ERROR) # Verify monitor occurs for duration of timeout period on failure for test_type in test_types: test = self.new_test("%s_monitor_timeout" % test_type["prefix"], "Verify monitor uses duration of timeout period given.", test_type["use_cpg"]) test.add_cmd("stonith_admin", '-R true1 -a fence_dummy -o "mode=fail" -o "monitor_mode=fail" -o "pcmk_host_list=node3"') test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 5", CrmExit.ERROR) test.add_stonith_log_pattern("Attempt 2 to execute") # Verify monitor occurs for duration of timeout period on failure, but stops at max retries for test_type in test_types: test = self.new_test("%s_monitor_timeout_max_retries" % test_type["prefix"], "Verify monitor retries until max retry value or timeout is hit.", test_type["use_cpg"]) test.add_cmd("stonith_admin", '-R true1 -a fence_dummy -o "mode=fail" -o "monitor_mode=fail" -o "pcmk_host_list=node3"') test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 15", CrmExit.ERROR) test.add_stonith_log_pattern("Attempted to execute agent fence_dummy (list) the maximum number of times") # simple register test for test_type in test_types: test = self.new_test("%s_register" % test_type["prefix"], "Verify devices can be registered and un-registered", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-Q true1") test.add_cmd("stonith_admin", "-D true1") test.add_expected_fail_cmd("stonith_admin", "-Q true1", CrmExit.ERROR) # simple reboot test for test_type in test_types: test = self.new_test("%s_reboot" % test_type["prefix"], "Verify devices can be rebooted", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-B node3 -t 2") test.add_cmd("stonith_admin", "-D true1") test.add_expected_fail_cmd("stonith_admin", "-Q true1", CrmExit.ERROR) # test fencing history. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_fence_history" % test_type["prefix"], "Verify last fencing operation is returned.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "-F node3 -t 2 -V") test.add_cmd_check_stdout("stonith_admin", "-H node3", "was able to turn off node node3", "") # simple test of dynamic list query for test_type in test_types: test = self.new_test("%s_dynamic_list_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd_check_stdout("stonith_admin", "-l fake_port_1", "3 devices found") # fence using dynamic list query for test_type in test_types: test = self.new_test("%s_fence_dynamic_list_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "-F fake_port_1 -t 5 -V") # simple test of query using status action for test_type in test_types: test = self.new_test("%s_status_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd_check_stdout("stonith_admin", "-l fake_port_1", "3 devices found") # test what happens when no reboot action is advertised for test_type in test_types: test = self.new_test("%s_no_reboot_support" % test_type["prefix"], "Verify reboot action defaults to off when no reboot action is advertised by agent.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_no_reboot -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-B node1 -t 5 -V") test.add_stonith_log_pattern("does not advertise support for 'reboot', performing 'off'") test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)") # make sure reboot is used when reboot action is advertised for test_type in test_types: test = self.new_test("%s_with_reboot_support" % test_type["prefix"], "Verify reboot action can be used when metadata advertises it.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "-B node1 -t 5 -V") test.add_stonith_neg_log_pattern("does not advertise support for 'reboot', performing 'off'") test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)") def build_nodeid_tests(self): """ Register tests that use a corosync node id """ our_uname = localname() ### verify nodeid is supplied when nodeid is in the metadata parameters test = self.new_test("cpg_supply_nodeid", "Verify nodeid is given when fence agent has nodeid as parameter", 1) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname)) test.add_stonith_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) ### verify nodeid is _NOT_ supplied when nodeid is not in the metadata parameters test = self.new_test("cpg_do_not_supply_nodeid", "Verify nodeid is _NOT_ given when fence agent does not have nodeid as parameter", 1) # use a host name that won't be in corosync.conf test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=regr-test\"") test.add_cmd("stonith_admin", "-F regr-test -t 3") test.add_stonith_neg_log_pattern("For stonith action (off) for victim regr-test, adding nodeid") ### verify nodeid use doesn't explode standalone mode test = self.new_test("standalone_do_not_supply_nodeid", "Verify nodeid in metadata parameter list doesn't kill standalone mode", 0) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname)) test.add_stonith_neg_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) def build_unfence_tests(self): """ Register tests that verify unfencing """ our_uname = localname() ### verify unfencing using automatic unfencing test = self.new_test("cpg_unfence_required_1", "Verify require unfencing on all devices when automatic=true in agent's metadata", 1) test.add_cmd('stonith_admin', '-R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd('stonith_admin', '-R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) # both devices should be executed test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)") test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)") ### verify unfencing using automatic unfencing fails if any of the required agents fail test = self.new_test("cpg_unfence_required_2", "Verify require unfencing on all devices when automatic=true in agent's metadata", 1) test.add_cmd('stonith_admin', '-R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd('stonith_admin', '-R true2 -a fence_dummy_auto_unfence -o "mode=fail" -o "pcmk_host_list=%s"' % (our_uname)) test.add_expected_fail_cmd("stonith_admin", "-U %s -t 6" % (our_uname), CrmExit.ERROR) ### verify unfencing using automatic devices with topology test = self.new_test("cpg_unfence_required_3", "Verify require unfencing on all devices even when at different topology levels", 1) test.add_cmd('stonith_admin', '-R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '-R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)") test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)") ### verify unfencing using automatic devices with topology test = self.new_test("cpg_unfence_required_4", "Verify all required devices are executed even with topology levels fail.", 1) test.add_cmd('stonith_admin', '-R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '-R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '-R true3 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '-R true4 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '-R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '-R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '-R false3 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '-R false4 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 1 -v false1" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 2 -v false2" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 2 -v false3" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 2 -v true3" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 3 -v false4" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 4 -v true4" % (our_uname)) test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)") test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)") test.add_stonith_log_pattern("with device 'true3' returned: 0 (OK)") test.add_stonith_log_pattern("with device 'true4' returned: 0 (OK)") def build_unfence_on_target_tests(self): """ Register tests that verify unfencing that runs on the target """ our_uname = localname() ### verify unfencing using on_target device test = self.new_test("cpg_unfence_on_target_1", "Verify unfencing with on_target = true", 1) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) test.add_stonith_log_pattern("(on) to be executed on the target node") ### verify failure of unfencing using on_target device test = self.new_test("cpg_unfence_on_target_2", "Verify failure unfencing with on_target = true", 1) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake_1234\"" % (our_uname)) test.add_expected_fail_cmd("stonith_admin", "-U node_fake_1234 -t 3", CrmExit.ERROR) test.add_stonith_log_pattern("(on) to be executed on the target node") ### verify unfencing using on_target device with topology test = self.new_test("cpg_unfence_on_target_3", "Verify unfencing with on_target = true using topology", 1) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) test.add_stonith_log_pattern("(on) to be executed on the target node") ### verify unfencing using on_target device with topology fails when victim node doesn't exist test = self.new_test("cpg_unfence_on_target_4", "Verify unfencing failure with on_target = true using topology", 1) test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) test.add_cmd("stonith_admin", "-r node_fake -i 1 -v true1") test.add_cmd("stonith_admin", "-r node_fake -i 2 -v true2") test.add_expected_fail_cmd("stonith_admin", "-U node_fake -t 3", CrmExit.ERROR) test.add_stonith_log_pattern("(on) to be executed on the target node") def build_remap_tests(self): """ Register tests that verify remapping of reboots to off-on """ test = self.new_test("cpg_remap_simple", "Verify sequential topology reboot is remapped to all-off-then-all-on", 1) test.add_cmd("stonith_admin", """-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """ """-o "pcmk_off_timeout=1" -o "pcmk_reboot_timeout=10" """) test.add_cmd("stonith_admin", """-R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """ """-o "pcmk_off_timeout=2" -o "pcmk_reboot_timeout=20" """) test.add_cmd("stonith_admin", "-r node_fake -i 1 -v true1 -v true2") test.add_cmd("stonith_admin", "-B node_fake -t 5") test.add_stonith_log_pattern("Remapping multiple-device reboot of node_fake") # timeout should be sum of off timeouts (1+2=3), not reboot timeouts (10+20=30) test.add_stonith_log_pattern("Total timeout set to 3 for peer's fencing of node_fake") test.add_stonith_log_pattern("perform op 'node_fake off' with 'true1'") test.add_stonith_log_pattern("perform op 'node_fake off' with 'true2'") test.add_stonith_log_pattern("Remapped off of node_fake complete, remapping to on") # fence_dummy sets "on" as an on_target action test.add_stonith_log_pattern("Ignoring true1 'on' failure (no capable peers) for node_fake") test.add_stonith_log_pattern("Ignoring true2 'on' failure (no capable peers) for node_fake") test.add_stonith_log_pattern("Undoing remap of reboot of node_fake") test = self.new_test("cpg_remap_automatic", "Verify remapped topology reboot skips automatic 'on'", 1) test.add_cmd("stonith_admin", """-R true1 -a fence_dummy_auto_unfence """ """-o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """-R true2 -a fence_dummy_auto_unfence """ """-o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "-r node_fake -i 1 -v true1 -v true2") test.add_cmd("stonith_admin", "-B node_fake -t 5") test.add_stonith_log_pattern("Remapping multiple-device reboot of node_fake") test.add_stonith_log_pattern("perform op 'node_fake off' with 'true1'") test.add_stonith_log_pattern("perform op 'node_fake off' with 'true2'") test.add_stonith_log_pattern("Remapped off of node_fake complete, remapping to on") test.add_stonith_log_pattern("Undoing remap of reboot of node_fake") test.add_stonith_neg_log_pattern("perform op 'node_fake on' with") test.add_stonith_neg_log_pattern("'on' failure") test = self.new_test("cpg_remap_complex_1", "Verify remapped topology reboot in second level works if non-remapped first level fails", 1) test.add_cmd("stonith_admin", """-R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """-R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "-r node_fake -i 1 -v false1") test.add_cmd("stonith_admin", "-r node_fake -i 2 -v true1 -v true2") test.add_cmd("stonith_admin", "-B node_fake -t 5") test.add_stonith_log_pattern("perform op 'node_fake reboot' with 'false1'") test.add_stonith_log_pattern("Remapping multiple-device reboot of node_fake") test.add_stonith_log_pattern("perform op 'node_fake off' with 'true1'") test.add_stonith_log_pattern("perform op 'node_fake off' with 'true2'") test.add_stonith_log_pattern("Remapped off of node_fake complete, remapping to on") test.add_stonith_log_pattern("Ignoring true1 'on' failure (no capable peers) for node_fake") test.add_stonith_log_pattern("Ignoring true2 'on' failure (no capable peers) for node_fake") test.add_stonith_log_pattern("Undoing remap of reboot of node_fake") test = self.new_test("cpg_remap_complex_2", "Verify remapped topology reboot failure in second level proceeds to third level", 1) test.add_cmd("stonith_admin", """-R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """-R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """-R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """-R true3 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "-r node_fake -i 1 -v false1") test.add_cmd("stonith_admin", "-r node_fake -i 2 -v true1 -v false2 -v true3") test.add_cmd("stonith_admin", "-r node_fake -i 3 -v true2") test.add_cmd("stonith_admin", "-B node_fake -t 5") test.add_stonith_log_pattern("perform op 'node_fake reboot' with 'false1'") test.add_stonith_log_pattern("Remapping multiple-device reboot of node_fake") test.add_stonith_log_pattern("perform op 'node_fake off' with 'true1'") test.add_stonith_log_pattern("perform op 'node_fake off' with 'false2'") test.add_stonith_log_pattern("Attempted to execute agent fence_dummy (off) the maximum number of times") test.add_stonith_log_pattern("Undoing remap of reboot of node_fake") test.add_stonith_log_pattern("perform op 'node_fake reboot' with 'true2'") test.add_stonith_neg_log_pattern("node_fake with true3") def setup_environment(self, use_corosync): """ Prepare the host before executing any tests """ if self.autogen_corosync_cfg and use_corosync: corosync_conf = (""" totem { version: 2 crypto_cipher: none crypto_hash: none nodeid: 101 secauth: off interface { ttl: 1 ringnumber: 0 mcastport: 6666 mcastaddr: 226.94.1.1 bindnetaddr: 127.0.0.1 } } logging { debug: off fileline: off to_syslog: no to_stderr: no syslog_facility: daemon timestamp: on to_logfile: yes logfile: @CRM_LOG_DIR@/corosync.test.log logfile_priority: info } """) os.system("cat <<-END >>/etc/corosync/corosync.conf\n%s\nEND" % (corosync_conf)) if use_corosync: ### make sure we are in control ### killall("corosync") self.start_corosync() os.system("cp %s /usr/sbin/fence_dummy" % FENCE_DUMMY) # modifies dummy agent to do require unfencing os.system("sed 's/on_target=/automatic=/g' %s > /usr/sbin/fence_dummy_auto_unfence" % FENCE_DUMMY) os.system("chmod 711 /usr/sbin/fence_dummy_auto_unfence") # modifies dummy agent to not advertise reboot os.system("sed 's/^.*.*//g' %s > /usr/sbin/fence_dummy_no_reboot" % FENCE_DUMMY) os.system("chmod 711 /usr/sbin/fence_dummy_no_reboot") def cleanup_environment(self, use_corosync): """ Clean up the host after executing desired tests """ if use_corosync: killall("corosync") if self.verbose and os.path.exists('@CRM_LOG_DIR@/corosync.test.log'): print("Corosync output") logfile = io.open('@CRM_LOG_DIR@/corosync.test.log', 'rt') for line in logfile.readlines(): print(line.strip()) os.remove('@CRM_LOG_DIR@/corosync.test.log') if self.autogen_corosync_cfg: os.system("rm -f /etc/corosync/corosync.conf") os.system("rm -f /usr/sbin/fence_dummy") os.system("rm -f /usr/sbin/fence_dummy_auto_unfence") os.system("rm -f /usr/sbin/fence_dummy_no_reboot") class TestOptions(object): """ Option handler """ def __init__(self): self.options = {} self.options['list-tests'] = 0 self.options['run-all'] = 1 self.options['run-only'] = "" self.options['run-only-pattern'] = "" self.options['verbose'] = 0 self.options['invalid-arg'] = "" self.options['cpg-only'] = 0 self.options['no-cpg'] = 0 self.options['show-usage'] = 0 def build_options(self, argv): """ Set options based on command-line arguments """ args = argv[1:] skip = 0 for i in range(0, len(args)): if skip: skip = 0 continue elif args[i] == "-h" or args[i] == "--help": self.options['show-usage'] = 1 elif args[i] == "-l" or args[i] == "--list-tests": self.options['list-tests'] = 1 elif args[i] == "-V" or args[i] == "--verbose": self.options['verbose'] = 1 elif args[i] == "-n" or args[i] == "--no-cpg": self.options['no-cpg'] = 1 elif args[i] == "-c" or args[i] == "--cpg-only": self.options['cpg-only'] = 1 elif args[i] == "-r" or args[i] == "--run-only": self.options['run-only'] = args[i+1] skip = 1 elif args[i] == "-p" or args[i] == "--run-only-pattern": self.options['run-only-pattern'] = args[i+1] skip = 1 def show_usage(self): """ Show command usage """ print("usage: " + sys.argv[0] + " [options]") print("If no options are provided, all tests will run") print("Options:") print("\t [--help | -h] Show usage") print("\t [--list-tests | -l] Print out all registered tests.") print("\t [--cpg-only | -c] Only run tests that require corosync.") print("\t [--no-cpg | -n] Only run tests that do not require corosync") print("\t [--run-only | -r 'testname'] Run a specific test") print("\t [--verbose | -V] Verbose output") print("\t [--run-only-pattern | -p 'string'] Run only tests containing the string value") print("\n\tExample: Run only the test 'start_top'") print("\t\t python ./regression.py --run-only start_stop") print("\n\tExample: Run only the tests with the string 'systemd' present in them") print("\t\t python ./regression.py --run-only-pattern systemd") def main(argv): """ Run fencing regression tests as specified by arguments """ opts = TestOptions() opts.build_options(argv) use_corosync = 1 tests = Tests(opts.options['verbose']) tests.build_standalone_tests() tests.build_custom_timeout_tests() tests.build_api_sanity_tests() tests.build_fence_merge_tests() tests.build_fence_no_merge_tests() tests.build_unfence_tests() tests.build_unfence_on_target_tests() tests.build_nodeid_tests() tests.build_remap_tests() if opts.options['list-tests']: tests.print_list() sys.exit(CrmExit.OK) elif opts.options['show-usage']: opts.show_usage() sys.exit(CrmExit.OK) print("Starting ...") if opts.options['no-cpg']: use_corosync = 0 tests.setup_environment(use_corosync) if opts.options['run-only-pattern'] != "": tests.run_tests_matching(opts.options['run-only-pattern']) tests.print_results() elif opts.options['run-only'] != "": tests.run_single(opts.options['run-only']) tests.print_results() elif opts.options['no-cpg']: tests.run_no_cpg() tests.print_results() elif opts.options['cpg-only']: tests.run_cpg_only() tests.print_results() else: tests.run_tests() tests.print_results() tests.cleanup_environment(use_corosync) tests.exit() if __name__ == "__main__": main(sys.argv) diff --git a/lib/services/services_linux.c b/lib/services/services_linux.c index 87d48f1a64..169f43b45d 100644 --- a/lib/services/services_linux.c +++ b/lib/services/services_linux.c @@ -1,927 +1,938 @@ /* * Copyright (C) 2010-2016 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include +#include #include #include #include #ifdef HAVE_SYS_SIGNALFD_H #include #endif #include "crm/crm.h" #include "crm/common/mainloop.h" #include "crm/services.h" #include "services_private.h" #if SUPPORT_CIBSECRETS # include "crm/common/cib_secrets.h" #endif static gboolean svc_read_output(int fd, svc_action_t * op, bool is_stderr) { char *data = NULL; int rc = 0, len = 0; char buf[500]; static const size_t buf_read_len = sizeof(buf) - 1; if (fd < 0) { crm_trace("No fd for %s", op->id); return FALSE; } if (is_stderr && op->stderr_data) { len = strlen(op->stderr_data); data = op->stderr_data; crm_trace("Reading %s stderr into offset %d", op->id, len); } else if (is_stderr == FALSE && op->stdout_data) { len = strlen(op->stdout_data); data = op->stdout_data; crm_trace("Reading %s stdout into offset %d", op->id, len); } else { crm_trace("Reading %s %s into offset %d", op->id, is_stderr?"stderr":"stdout", len); } do { rc = read(fd, buf, buf_read_len); if (rc > 0) { crm_trace("Got %d chars: %.80s", rc, buf); buf[rc] = 0; data = realloc_safe(data, len + rc + 1); len += sprintf(data + len, "%s", buf); } else if (errno != EINTR) { /* error or EOF * Cleanup happens in pipe_done() */ rc = FALSE; break; } } while (rc == buf_read_len || rc < 0); if (is_stderr) { op->stderr_data = data; } else { op->stdout_data = data; } return rc; } static int dispatch_stdout(gpointer userdata) { svc_action_t *op = (svc_action_t *) userdata; return svc_read_output(op->opaque->stdout_fd, op, FALSE); } static int dispatch_stderr(gpointer userdata) { svc_action_t *op = (svc_action_t *) userdata; return svc_read_output(op->opaque->stderr_fd, op, TRUE); } static void pipe_out_done(gpointer user_data) { svc_action_t *op = (svc_action_t *) user_data; crm_trace("%p", op); op->opaque->stdout_gsource = NULL; if (op->opaque->stdout_fd > STDOUT_FILENO) { close(op->opaque->stdout_fd); } op->opaque->stdout_fd = -1; } static void pipe_err_done(gpointer user_data) { svc_action_t *op = (svc_action_t *) user_data; op->opaque->stderr_gsource = NULL; if (op->opaque->stderr_fd > STDERR_FILENO) { close(op->opaque->stderr_fd); } op->opaque->stderr_fd = -1; } static struct mainloop_fd_callbacks stdout_callbacks = { .dispatch = dispatch_stdout, .destroy = pipe_out_done, }; static struct mainloop_fd_callbacks stderr_callbacks = { .dispatch = dispatch_stderr, .destroy = pipe_err_done, }; static void set_ocf_env(const char *key, const char *value, gpointer user_data) { if (setenv(key, value, 1) != 0) { crm_perror(LOG_ERR, "setenv failed for key:%s and value:%s", key, value); } } static void set_ocf_env_with_prefix(gpointer key, gpointer value, gpointer user_data) { char buffer[500]; snprintf(buffer, sizeof(buffer), "OCF_RESKEY_%s", (char *)key); set_ocf_env(buffer, value, user_data); } /*! * \internal * \brief Add environment variables suitable for an action * * \param[in] op Action to use */ static void add_action_env_vars(const svc_action_t *op) { if (safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_OCF) == FALSE) { return; } if (op->params) { g_hash_table_foreach(op->params, set_ocf_env_with_prefix, NULL); } set_ocf_env("OCF_RA_VERSION_MAJOR", "1", NULL); set_ocf_env("OCF_RA_VERSION_MINOR", "0", NULL); set_ocf_env("OCF_ROOT", OCF_ROOT_DIR, NULL); set_ocf_env("OCF_EXIT_REASON_PREFIX", PCMK_OCF_REASON_PREFIX, NULL); if (op->rsc) { set_ocf_env("OCF_RESOURCE_INSTANCE", op->rsc, NULL); } if (op->agent != NULL) { set_ocf_env("OCF_RESOURCE_TYPE", op->agent, NULL); } /* Notes: this is not added to specification yet. Sept 10,2004 */ if (op->provider != NULL) { set_ocf_env("OCF_RESOURCE_PROVIDER", op->provider, NULL); } } gboolean recurring_action_timer(gpointer data) { svc_action_t *op = data; crm_debug("Scheduling another invocation of %s", op->id); /* Clean out the old result */ free(op->stdout_data); op->stdout_data = NULL; free(op->stderr_data); op->stderr_data = NULL; op->opaque->repeat_timer = 0; services_action_async(op, NULL); return FALSE; } /* Returns FALSE if 'op' should be free'd by the caller */ gboolean operation_finalize(svc_action_t * op) { int recurring = 0; if (op->interval) { if (op->cancel) { op->status = PCMK_LRM_OP_CANCELLED; cancel_recurring_action(op); } else { recurring = 1; op->opaque->repeat_timer = g_timeout_add(op->interval, recurring_action_timer, (void *)op); } } if (op->opaque->callback) { op->opaque->callback(op); } op->pid = 0; services_untrack_op(op); if (!recurring && op->synchronous == FALSE) { /* * If this is a recurring action, do not free explicitly. * It will get freed whenever the action gets cancelled. */ services_action_free(op); return TRUE; } services_action_cleanup(op); return FALSE; } static void operation_finished(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode) { svc_action_t *op = mainloop_child_userdata(p); char *prefix = crm_strdup_printf("%s:%d", op->id, op->pid); mainloop_clear_child_userdata(p); op->status = PCMK_LRM_OP_DONE; CRM_ASSERT(op->pid == pid); crm_trace("%s %p %p", prefix, op->opaque->stderr_gsource, op->opaque->stdout_gsource); if (op->opaque->stderr_gsource) { /* Make sure we have read everything from the buffer. * Depending on the priority mainloop gives the fd, operation_finished * could occur before all the reads are done. Force the read now.*/ crm_trace("%s dispatching stderr", prefix); dispatch_stderr(op); crm_trace("%s: %p", op->id, op->stderr_data); mainloop_del_fd(op->opaque->stderr_gsource); op->opaque->stderr_gsource = NULL; } if (op->opaque->stdout_gsource) { /* Make sure we have read everything from the buffer. * Depending on the priority mainloop gives the fd, operation_finished * could occur before all the reads are done. Force the read now.*/ crm_trace("%s dispatching stdout", prefix); dispatch_stdout(op); crm_trace("%s: %p", op->id, op->stdout_data); mainloop_del_fd(op->opaque->stdout_gsource); op->opaque->stdout_gsource = NULL; } if (signo) { if (mainloop_child_timeout(p)) { crm_warn("%s - timed out after %dms", prefix, op->timeout); op->status = PCMK_LRM_OP_TIMEOUT; op->rc = PCMK_OCF_TIMEOUT; } else { do_crm_log_unlikely((op->cancel) ? LOG_INFO : LOG_WARNING, "%s - terminated with signal %d", prefix, signo); op->status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_SIGNAL; } } else { op->rc = exitcode; crm_debug("%s - exited with rc=%d", prefix, exitcode); } free(prefix); prefix = crm_strdup_printf("%s:%d:stderr", op->id, op->pid); crm_log_output(LOG_NOTICE, prefix, op->stderr_data); free(prefix); prefix = crm_strdup_printf("%s:%d:stdout", op->id, op->pid); crm_log_output(LOG_DEBUG, prefix, op->stdout_data); free(prefix); operation_finalize(op); } /*! * \internal * \brief Set operation rc and status per errno from stat(), fork() or execvp() * * \param[in,out] op Operation to set rc and status for * \param[in] error Value of errno after system call * * \return void */ static void services_handle_exec_error(svc_action_t * op, int error) { int rc_not_installed, rc_insufficient_priv, rc_exec_error; /* Mimic the return codes for each standard as that's what we'll convert back from in get_uniform_rc() */ if (safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB) && safe_str_eq(op->action, "status")) { rc_not_installed = PCMK_LSB_STATUS_NOT_INSTALLED; rc_insufficient_priv = PCMK_LSB_STATUS_INSUFFICIENT_PRIV; rc_exec_error = PCMK_LSB_STATUS_UNKNOWN; #if SUPPORT_NAGIOS } else if (safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_NAGIOS)) { rc_not_installed = NAGIOS_NOT_INSTALLED; rc_insufficient_priv = NAGIOS_INSUFFICIENT_PRIV; rc_exec_error = PCMK_OCF_EXEC_ERROR; #endif } else { rc_not_installed = PCMK_OCF_NOT_INSTALLED; rc_insufficient_priv = PCMK_OCF_INSUFFICIENT_PRIV; rc_exec_error = PCMK_OCF_EXEC_ERROR; } switch (error) { /* see execve(2), stat(2) and fork(2) */ case ENOENT: /* No such file or directory */ case EISDIR: /* Is a directory */ case ENOTDIR: /* Path component is not a directory */ case EINVAL: /* Invalid executable format */ case ENOEXEC: /* Invalid executable format */ op->rc = rc_not_installed; op->status = PCMK_LRM_OP_NOT_INSTALLED; break; case EACCES: /* permission denied (various errors) */ case EPERM: /* permission denied (various errors) */ op->rc = rc_insufficient_priv; op->status = PCMK_LRM_OP_ERROR; break; default: op->rc = rc_exec_error; op->status = PCMK_LRM_OP_ERROR; } } static void action_launch_child(svc_action_t *op) { int lpc; /* SIGPIPE is ignored (which is different from signal blocking) by the gnutls library. * Depending on the libqb version in use, libqb may set SIGPIPE to be ignored as well. * We do not want this to be inherited by the child process. By resetting this the signal * to the default behavior, we avoid some potential odd problems that occur during OCF * scripts when SIGPIPE is ignored by the environment. */ signal(SIGPIPE, SIG_DFL); #if defined(HAVE_SCHED_SETSCHEDULER) if (sched_getscheduler(0) != SCHED_OTHER) { struct sched_param sp; memset(&sp, 0, sizeof(sp)); sp.sched_priority = 0; if (sched_setscheduler(0, SCHED_OTHER, &sp) == -1) { crm_perror(LOG_ERR, "Could not reset scheduling policy to SCHED_OTHER for %s", op->id); } } #endif if (setpriority(PRIO_PROCESS, 0, 0) == -1) { crm_perror(LOG_ERR, "Could not reset process priority to 0 for %s", op->id); } /* Man: The call setpgrp() is equivalent to setpgid(0,0) * _and_ compiles on BSD variants too * need to investigate if it works the same too. */ setpgid(0, 0); // Close all file descriptors except stdin/stdout/stderr for (lpc = getdtablesize() - 1; lpc > STDERR_FILENO; lpc--) { close(lpc); } #if SUPPORT_CIBSECRETS if (replace_secret_params(op->rsc, op->params) < 0) { /* replacing secrets failed! */ if (safe_str_eq(op->action,"stop")) { /* don't fail on stop! */ crm_info("proceeding with the stop operation for %s", op->rsc); } else { crm_err("failed to get secrets for %s, " "considering resource not configured", op->rsc); _exit(PCMK_OCF_NOT_CONFIGURED); } } #endif add_action_env_vars(op); /* Become the desired user */ if (op->opaque->uid && (geteuid() == 0)) { + + // If requested, set effective group if (op->opaque->gid && (setgid(op->opaque->gid) < 0)) { - crm_perror(LOG_ERR, "setting group to %d", op->opaque->gid); + crm_perror(LOG_ERR, "Could not set child group to %d", op->opaque->gid); _exit(PCMK_OCF_NOT_CONFIGURED); } + + // Erase supplementary group list + // (We could do initgroups() if we kept a copy of the username) + if (setgroups(0, NULL) < 0) { + crm_perror(LOG_ERR, "Could not set child groups"); + _exit(PCMK_OCF_NOT_CONFIGURED); + } + + // Set effective user if (setuid(op->opaque->uid) < 0) { crm_perror(LOG_ERR, "setting user to %d", op->opaque->uid); _exit(PCMK_OCF_NOT_CONFIGURED); } - /* We could do initgroups() here if we kept a copy of the username */ } /* execute the RA */ execvp(op->opaque->exec, op->opaque->args); /* Most cases should have been already handled by stat() */ services_handle_exec_error(op, errno); _exit(op->rc); } #ifndef HAVE_SYS_SIGNALFD_H static int sigchld_pipe[2] = { -1, -1 }; static void sigchld_handler() { if ((sigchld_pipe[1] >= 0) && (write(sigchld_pipe[1], "", 1) == -1)) { crm_perror(LOG_TRACE, "Could not poke SIGCHLD self-pipe"); } } #endif static void action_synced_wait(svc_action_t * op, sigset_t *mask) { int status = 0; int timeout = op->timeout; int sfd = -1; time_t start = -1; struct pollfd fds[3]; int wait_rc = 0; #ifdef HAVE_SYS_SIGNALFD_H sfd = signalfd(-1, mask, SFD_NONBLOCK); if (sfd < 0) { crm_perror(LOG_ERR, "signalfd() failed"); } #else sfd = sigchld_pipe[0]; #endif fds[0].fd = op->opaque->stdout_fd; fds[0].events = POLLIN; fds[0].revents = 0; fds[1].fd = op->opaque->stderr_fd; fds[1].events = POLLIN; fds[1].revents = 0; fds[2].fd = sfd; fds[2].events = POLLIN; fds[2].revents = 0; crm_trace("Waiting for %d", op->pid); start = time(NULL); do { int poll_rc = poll(fds, 3, timeout); if (poll_rc > 0) { if (fds[0].revents & POLLIN) { svc_read_output(op->opaque->stdout_fd, op, FALSE); } if (fds[1].revents & POLLIN) { svc_read_output(op->opaque->stderr_fd, op, TRUE); } if (fds[2].revents & POLLIN) { #ifdef HAVE_SYS_SIGNALFD_H struct signalfd_siginfo fdsi; ssize_t s; s = read(sfd, &fdsi, sizeof(struct signalfd_siginfo)); if (s != sizeof(struct signalfd_siginfo)) { crm_perror(LOG_ERR, "Read from signal fd %d failed", sfd); } else if (fdsi.ssi_signo == SIGCHLD) { #else if (1) { /* Clear out the sigchld pipe. */ char ch; while (read(sfd, &ch, 1) == 1) /*omit*/; #endif wait_rc = waitpid(op->pid, &status, WNOHANG); if (wait_rc > 0) { break; } else if (wait_rc < 0){ if (errno == ECHILD) { /* Here, don't dare to kill and bail out... */ break; } else { /* ...otherwise pretend process still runs. */ wait_rc = 0; } crm_perror(LOG_ERR, "waitpid() for %d failed", op->pid); } } } } else if (poll_rc == 0) { timeout = 0; break; } else if (poll_rc < 0) { if (errno != EINTR) { crm_perror(LOG_ERR, "poll() failed"); break; } } timeout = op->timeout - (time(NULL) - start) * 1000; } while ((op->timeout < 0 || timeout > 0)); crm_trace("Child done: %d", op->pid); if (wait_rc <= 0) { op->rc = PCMK_OCF_UNKNOWN_ERROR; if (op->timeout > 0 && timeout <= 0) { op->status = PCMK_LRM_OP_TIMEOUT; crm_warn("%s:%d - timed out after %dms", op->id, op->pid, op->timeout); } else { op->status = PCMK_LRM_OP_ERROR; } /* If only child hasn't been successfully waited for, yet. This is to limit killing wrong target a bit more. */ if (wait_rc == 0 && waitpid(op->pid, &status, WNOHANG) == 0) { if (kill(op->pid, SIGKILL)) { crm_err("kill(%d, KILL) failed: %d", op->pid, errno); } /* Safe to skip WNOHANG here as we sent non-ignorable signal. */ while (waitpid(op->pid, &status, 0) == (pid_t) -1 && errno == EINTR) /*omit*/; } } else if (WIFEXITED(status)) { op->status = PCMK_LRM_OP_DONE; op->rc = WEXITSTATUS(status); crm_info("Managed %s process %d exited with rc=%d", op->id, op->pid, op->rc); } else if (WIFSIGNALED(status)) { int signo = WTERMSIG(status); op->status = PCMK_LRM_OP_ERROR; crm_err("Managed %s process %d exited with signal=%d", op->id, op->pid, signo); } #ifdef WCOREDUMP if (WCOREDUMP(status)) { crm_err("Managed %s process %d dumped core", op->id, op->pid); } #endif svc_read_output(op->opaque->stdout_fd, op, FALSE); svc_read_output(op->opaque->stderr_fd, op, TRUE); close(op->opaque->stdout_fd); close(op->opaque->stderr_fd); #ifdef HAVE_SYS_SIGNALFD_H close(sfd); #endif } /* For an asynchronous 'op', returns FALSE if 'op' should be free'd by the caller */ /* For a synchronous 'op', returns FALSE if 'op' fails */ gboolean services_os_action_execute(svc_action_t * op) { int stdout_fd[2]; int stderr_fd[2]; int rc; struct stat st; sigset_t *pmask; #ifdef HAVE_SYS_SIGNALFD_H sigset_t mask; sigset_t old_mask; #define sigchld_cleanup() do { \ if (sigismember(&old_mask, SIGCHLD) == 0) { \ if (sigprocmask(SIG_UNBLOCK, &mask, NULL) < 0) { \ crm_perror(LOG_ERR, "sigprocmask() failed to unblock sigchld"); \ } \ } \ } while (0) #else struct sigaction sa; struct sigaction old_sa; #define sigchld_cleanup() do { \ if (sigaction(SIGCHLD, &old_sa, NULL) < 0) { \ crm_perror(LOG_ERR, "sigaction() failed to remove sigchld handler"); \ } \ close(sigchld_pipe[0]); \ close(sigchld_pipe[1]); \ sigchld_pipe[0] = sigchld_pipe[1] = -1; \ } while(0) #endif /* Fail fast */ if(stat(op->opaque->exec, &st) != 0) { rc = errno; crm_warn("Cannot execute '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!op->synchronous) { return operation_finalize(op); } return FALSE; } if (pipe(stdout_fd) < 0) { rc = errno; crm_err("pipe(stdout_fd) failed. '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!op->synchronous) { return operation_finalize(op); } return FALSE; } if (pipe(stderr_fd) < 0) { rc = errno; close(stdout_fd[0]); close(stdout_fd[1]); crm_err("pipe(stderr_fd) failed. '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!op->synchronous) { return operation_finalize(op); } return FALSE; } if (op->synchronous) { #ifdef HAVE_SYS_SIGNALFD_H sigemptyset(&mask); sigaddset(&mask, SIGCHLD); sigemptyset(&old_mask); if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) { crm_perror(LOG_ERR, "sigprocmask() failed to block sigchld"); } pmask = &mask; #else if(pipe(sigchld_pipe) == -1) { crm_perror(LOG_ERR, "pipe() failed"); } rc = crm_set_nonblocking(sigchld_pipe[0]); if (rc < 0) { crm_warn("Could not set pipe input non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } rc = crm_set_nonblocking(sigchld_pipe[1]); if (rc < 0) { crm_warn("Could not set pipe output non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } sa.sa_handler = sigchld_handler; sa.sa_flags = 0; sigemptyset(&sa.sa_mask); if (sigaction(SIGCHLD, &sa, &old_sa) < 0) { crm_perror(LOG_ERR, "sigaction() failed to set sigchld handler"); } pmask = NULL; #endif } op->pid = fork(); switch (op->pid) { case -1: rc = errno; close(stdout_fd[0]); close(stdout_fd[1]); close(stderr_fd[0]); close(stderr_fd[1]); crm_err("Could not execute '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!op->synchronous) { return operation_finalize(op); } sigchld_cleanup(); return FALSE; case 0: /* Child */ close(stdout_fd[0]); close(stderr_fd[0]); if (STDOUT_FILENO != stdout_fd[1]) { if (dup2(stdout_fd[1], STDOUT_FILENO) != STDOUT_FILENO) { crm_err("dup2() failed (stdout)"); } close(stdout_fd[1]); } if (STDERR_FILENO != stderr_fd[1]) { if (dup2(stderr_fd[1], STDERR_FILENO) != STDERR_FILENO) { crm_err("dup2() failed (stderr)"); } close(stderr_fd[1]); } if (op->synchronous) { sigchld_cleanup(); } action_launch_child(op); CRM_ASSERT(0); /* action_launch_child is effectively noreturn */ } /* Only the parent reaches here */ close(stdout_fd[1]); close(stderr_fd[1]); op->opaque->stdout_fd = stdout_fd[0]; rc = crm_set_nonblocking(op->opaque->stdout_fd); if (rc < 0) { crm_warn("Could not set child output non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } op->opaque->stderr_fd = stderr_fd[0]; rc = crm_set_nonblocking(op->opaque->stderr_fd); if (rc < 0) { crm_warn("Could not set child error output non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } if (op->synchronous) { action_synced_wait(op, pmask); sigchld_cleanup(); } else { crm_trace("Async waiting for %d - %s", op->pid, op->opaque->exec); mainloop_child_add_with_flags(op->pid, op->timeout, op->id, op, (op->flags & SVC_ACTION_LEAVE_GROUP) ? mainloop_leave_pid_group : 0, operation_finished); op->opaque->stdout_gsource = mainloop_add_fd(op->id, G_PRIORITY_LOW, op->opaque->stdout_fd, op, &stdout_callbacks); op->opaque->stderr_gsource = mainloop_add_fd(op->id, G_PRIORITY_LOW, op->opaque->stderr_fd, op, &stderr_callbacks); services_add_inflight_op(op); } return TRUE; } GList * services_os_get_directory_list(const char *root, gboolean files, gboolean executable) { GList *list = NULL; struct dirent **namelist; int entries = 0, lpc = 0; char buffer[PATH_MAX]; entries = scandir(root, &namelist, NULL, alphasort); if (entries <= 0) { return list; } for (lpc = 0; lpc < entries; lpc++) { struct stat sb; if ('.' == namelist[lpc]->d_name[0]) { free(namelist[lpc]); continue; } snprintf(buffer, sizeof(buffer), "%s/%s", root, namelist[lpc]->d_name); if (stat(buffer, &sb)) { continue; } if (S_ISDIR(sb.st_mode)) { if (files) { free(namelist[lpc]); continue; } } else if (S_ISREG(sb.st_mode)) { if (files == FALSE) { free(namelist[lpc]); continue; } else if (executable && (sb.st_mode & S_IXUSR) == 0 && (sb.st_mode & S_IXGRP) == 0 && (sb.st_mode & S_IXOTH) == 0) { free(namelist[lpc]); continue; } } list = g_list_append(list, strdup(namelist[lpc]->d_name)); free(namelist[lpc]); } free(namelist); return list; } GList * resources_os_list_lsb_agents(void) { return get_directory_list(LSB_ROOT_DIR, TRUE, TRUE); } GList * resources_os_list_ocf_providers(void) { return get_directory_list(OCF_ROOT_DIR "/resource.d", FALSE, TRUE); } GList * resources_os_list_ocf_agents(const char *provider) { GList *gIter = NULL; GList *result = NULL; GList *providers = NULL; if (provider) { char buffer[500]; snprintf(buffer, sizeof(buffer), "%s/resource.d/%s", OCF_ROOT_DIR, provider); return get_directory_list(buffer, TRUE, TRUE); } providers = resources_os_list_ocf_providers(); for (gIter = providers; gIter != NULL; gIter = gIter->next) { GList *tmp1 = result; GList *tmp2 = resources_os_list_ocf_agents(gIter->data); if (tmp2) { result = g_list_concat(tmp1, tmp2); } } g_list_free_full(providers, free); return result; } #if SUPPORT_NAGIOS GList * resources_os_list_nagios_agents(void) { GList *plugin_list = NULL; GList *result = NULL; GList *gIter = NULL; plugin_list = get_directory_list(NAGIOS_PLUGIN_DIR, TRUE, TRUE); /* Make sure both the plugin and its metadata exist */ for (gIter = plugin_list; gIter != NULL; gIter = gIter->next) { const char *plugin = gIter->data; char *metadata = crm_strdup_printf(NAGIOS_METADATA_DIR "/%s.xml", plugin); struct stat st; if (stat(metadata, &st) == 0) { result = g_list_append(result, strdup(plugin)); } free(metadata); } g_list_free_full(plugin_list, free); return result; } #endif diff --git a/lrmd/Makefile.am b/lrmd/Makefile.am index 7bfc3e0e86..a2aff315f6 100644 --- a/lrmd/Makefile.am +++ b/lrmd/Makefile.am @@ -1,63 +1,63 @@ # Copyright (c) 2012 David Vossel # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # include $(top_srcdir)/Makefile.common lrmdlibdir = $(CRM_DAEMON_DIR) lrmdlib_PROGRAMS = lrmd # Test components lrmdlib_PROGRAMS += lrmd_test testdir = $(datadir)/$(PACKAGE)/tests/lrmd test_SCRIPTS = regression.py initdir = $(INITDIR) init_SCRIPTS = pacemaker_remote sbin_PROGRAMS = pacemaker_remoted if BUILD_SYSTEMD systemdunit_DATA = pacemaker_remote.service endif lrmd_CFLAGS = $(CFLAGS_HARDENED_EXE) lrmd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) lrmd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/services/libcrmservice.la \ $(top_builddir)/lib/fencing/libstonithd.la ${COMPAT_LIBS} lrmd_SOURCES = main.c lrmd.c lrmd_alert_api.c pacemaker_remoted_CPPFLAGS = -DSUPPORT_REMOTE $(AM_CPPFLAGS) pacemaker_remoted_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_remoted_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemaker_remoted_LDADD = $(lrmd_LDADD) \ $(top_builddir)/lib/lrmd/liblrmd.la pacemaker_remoted_SOURCES = $(lrmd_SOURCES) tls_backend.c ipc_proxy.c lrmd_test_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/lrmd/liblrmd.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/services/libcrmservice.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/pengine/libpengine.la lrmd_test_SOURCES = test.c noinst_HEADERS = lrmd_private.h -CLEANFILES = $(man8_MANS) +CLEANFILES = regression.py $(man8_MANS) diff --git a/lrmd/regression.py.in b/lrmd/regression.py.in index 6e365534fc..e1d5d343ee 100755 --- a/lrmd/regression.py.in +++ b/lrmd/regression.py.in @@ -1,1273 +1,1273 @@ -#!/usr/bin/python +#!@PYTHON@ """ Regression tests for Pacemaker's lrmd """ # Pacemaker targets compatibility with Python 2.7 and 3.2+ from __future__ import print_function, unicode_literals, absolute_import, division __copyright__ = "Copyright (C) 2012-2018 Andrew Beekhof " __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import sys import subprocess import shlex import time # Where to find test binaries # Prefer the source tree if available BUILD_DIR = "@abs_top_builddir@" TEST_DIR = sys.path[0] # These values must be kept in sync with include/crm/crm.h class CrmExit: OK = 0 ERROR = 1 INVALID_PARAM = 2 UNIMPLEMENT_FEATURE = 3 INSUFFICIENT_PRIV = 4 NOT_INSTALLED = 5 NOT_CONFIGURED = 6 NOT_RUNNING = 7 USAGE = 64 DATAERR = 65 NOINPUT = 66 NOUSER = 67 NOHOST = 68 UNAVAILABLE = 69 SOFTWARE = 70 OSERR = 71 OSFILE = 72 CANTCREAT = 73 IOERR = 74 TEMPFAIL = 75 PROTOCOL = 76 NOPERM = 77 CONFIG = 78 FATAL = 100 PANIC = 101 DISCONNECT = 102 SOLO = 103 DIGEST = 104 NOSUCH = 105 QUORUM = 106 UNSAFE = 107 EXISTS = 108 MULTIPLE = 109 OLD = 110 TIMEOUT = 124 MAX = 255 def update_path(): """ Set the PATH environment variable appropriately for the tests """ new_path = os.environ['PATH'] if os.path.exists("%s/regression.py.in" % TEST_DIR): print("Running tests from the source tree: %s (%s)" % (BUILD_DIR, TEST_DIR)) new_path = "%s/lrmd:%s" % (BUILD_DIR, new_path) # For lrmd, lrmd_test and pacemaker_remoted new_path = "%s/tools:%s" % (BUILD_DIR, new_path) # For crm_resource new_path = "%s/fencing:%s" % (BUILD_DIR, new_path) # For stonithd else: print("Running tests from the install tree: @CRM_DAEMON_DIR@ (not %s)" % TEST_DIR) new_path = "@CRM_DAEMON_DIR@:%s" % (new_path) # For stonithd, lrmd, lrmd_test and pacemaker_remoted print('Using PATH="{}"'.format(new_path)) os.environ['PATH'] = new_path def pipe_output(pipes, stdout=True, stderr=False): """ Wrapper to get text output from pipes regardless of Python version """ output = "" pipe_outputs = pipes.communicate() if sys.version_info < (3,): if stdout: output = output + pipe_outputs[0] if stderr: output = output + pipe_outputs[1] else: if stdout: output = output + pipe_outputs[0].decode(sys.stdout.encoding) if stderr: output = output + pipe_outputs[1].decode(sys.stderr.encoding) return output def output_from_command(command): """ Run a command, and return its standard output. """ test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE) test.wait() return pipe_output(test).split("\n") class TestError(Exception): """ Base class for exceptions in this module """ pass class ExitCodeError(TestError): """ Exception raised when command exit status is unexpected """ def __init__(self, exit_code): self.exit_code = exit_code def __str__(self): return repr(self.exit_code) class OutputNotFoundError(TestError): """ Exception raised when command output does not contain wanted string """ def __init__(self, exit_code): self.output = output def __str__(self): return repr(self.output) class OutputFoundError(TestError): """ Exception raised when command output contains unwanted string """ def __init__(self, exit_code): self.output = output def __str__(self): return repr(self.output) class Test(object): """ Executor for a single lrmd regression test """ def __init__(self, name, description, verbose=0, tls=0): self.name = name self.description = description self.cmds = [] if tls: self.daemon_location = "pacemaker_remoted" else: self.daemon_location = "lrmd" self.test_tool_location = "lrmd_test" self.verbose = verbose self.tls = tls self.result_txt = "" self.cmd_tool_output = "" self.result_exitcode = CrmExit.OK self.lrmd_process = None self.stonith_process = None self.executed = 0 def __new_cmd(self, cmd, args, exitcode, stdout_match="", no_wait=0, stdout_negative_match="", kill=None): """ Add a command to be executed as part of this test """ if self.verbose and cmd == self.test_tool_location: args = args + " -V " if (cmd == self.test_tool_location) and self.tls: args = args + " -S " self.cmds.append( { "cmd" : cmd, "kill" : kill, "args" : args, "expected_exitcode" : exitcode, "stdout_match" : stdout_match, "stdout_negative_match" : stdout_negative_match, "no_wait" : no_wait, "cmd_output" : "", } ) def start_environment(self): """ Prepare the host for running a test """ ### make sure we are in full control here ### cmd = shlex.split("killall -q -9 stonithd lt-stonithd lrmd lt-lrmd lrmd_test lt-lrmd_test pacemaker_remoted") test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() additional_args = "" if self.tls == 0: self.stonith_process = subprocess.Popen(shlex.split("stonithd -s")) if self.verbose: additional_args = additional_args + " -V" self.lrmd_process = subprocess.Popen(shlex.split("%s %s -l /tmp/lrmd-regression.log" % (self.daemon_location, additional_args))) time.sleep(1) def clean_environment(self): """ Clean up the host after running a test """ if self.lrmd_process: self.lrmd_process.terminate() self.lrmd_process.wait() if self.verbose: print("Daemon output") logfile = io.open('/tmp/lrmd-regression.log', 'rt', errors='replace') for line in logfile: print(line.strip().encode('utf-8', 'replace')) os.remove('/tmp/lrmd-regression.log') if self.stonith_process: self.stonith_process.terminate() self.stonith_process.wait() self.lrmd_process = None self.stonith_process = None def add_sys_cmd(self, cmd, args): """ Add a simple command to be executed as part of this test """ self.__new_cmd(cmd, args, CrmExit.OK, "") def add_cmd_check_stdout(self, args, match, no_match=""): """ Add a command with expected output to be executed as part of this test """ self.__new_cmd(self.test_tool_location, args, CrmExit.OK, match, 0, no_match) def add_cmd(self, args): """ Add an lrmd_test command to be executed as part of this test """ self.__new_cmd(self.test_tool_location, args, CrmExit.OK, "") def add_cmd_and_kill(self, kill_proc, args): """ Add an lrmd_test command and system command to be executed as part of this test """ self.__new_cmd(self.test_tool_location, args, CrmExit.OK, "", kill=kill_proc) def add_expected_fail_cmd(self, args, exitcode=CrmExit.ERROR): """ Add an lrmd_test command to be executed as part of this test and expected to fail """ self.__new_cmd(self.test_tool_location, args, exitcode, "") def get_exitcode(self): """ Return the exit status of the last test execution """ return self.result_exitcode def print_result(self, filler): """ Print the result of the last test execution """ print("%s%s" % (filler, self.result_txt)) def run_cmd(self, args): """ Execute a command as part of this test """ cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print("\n\nRunning: "+" ".join(cmd)) test = subprocess.Popen(cmd, stdout=subprocess.PIPE) if args['kill']: if self.verbose: print("Also running: "+args['kill']) ### Typically, the kill argument is used to detect some sort of ### failure. Without yielding for a few seconds here, the process ### launched earlier that is listening for the failure may not have ### time to connect to the lrmd. time.sleep(2) subprocess.Popen(shlex.split(args['kill'])) if args['no_wait'] == 0: test.wait() else: return CrmExit.OK output = pipe_output(test) args['cmd_output'] = output if test.returncode != args['expected_exitcode']: raise ExitCodeError(test.returncode) if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0: raise OutputNotFoundError(output) if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0: raise OutputFoundError(output) def set_error(self, step, cmd): """ Record failure of this test """ msg = "FAILURE - '%s' failed at step %d. Command: %s %s" self.result_txt = msg % (self.name, step, cmd['cmd'], cmd['args']) self.result_exitcode = CrmExit.ERROR def run(self): """ Execute this test. """ res = 0 i = 1 if self.tls and self.name.count("stonith") != 0: self.result_txt = "SKIPPED - '%s' - disabled when testing pacemaker_remote" % (self.name) print(self.result_txt) return res self.start_environment() if self.verbose: print("\n--- START TEST - %s" % self.name) self.result_txt = "SUCCESS - '%s'" % (self.name) self.result_exitcode = CrmExit.OK for cmd in self.cmds: try: self.run_cmd(cmd) except ExitCodeError as e: print(cmd['cmd_output']) print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode'])) self.set_error(i, cmd); break except OutputNotFoundError as e: print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e)) self.set_error(i, cmd); break except OutputFoundError as e: print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_negative_match'], e)) self.set_error(i, cmd); break if self.verbose: print(cmd['cmd_output'].strip()) print("Step %d SUCCESS" % (i)) i = i + 1 self.clean_environment() print(self.result_txt) if self.verbose: print("--- END TEST - %s\n" % self.name) self.executed = 1 return res class Tests(object): """ Collection of all lrmd regression tests """ def __init__(self, verbose=0, tls=0): self.tests = [] self.verbose = verbose self.tls = tls self.rsc_classes = output_from_command("crm_resource --list-standards") self.rsc_classes = self.rsc_classes[:-1] # Strip trailing empty line self.need_authkey = 0 self.action_timeout = " -t 9000 " if self.tls: self.rsc_classes.remove("stonith") if "systemd" in self.rsc_classes: try: # This code doesn't need this import, but lrmd_dummy_daemon does, # so ensure the dependency is available rather than cause all # systemd tests to fail. import systemd.daemon except ImportError: print("Fatal error: python systemd bindings not found. Is package installed?", file=sys.stderr) sys.exit(CrmExit.ERROR) print("Testing resource classes", repr(self.rsc_classes)) self.common_cmds = { "ocf_reg_line" : "-c register_rsc -r ocf_test_rsc "+self.action_timeout+" -C ocf -P pacemaker -T Dummy", "ocf_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"", "ocf_unreg_line" : "-c unregister_rsc -r \"ocf_test_rsc\" "+self.action_timeout, "ocf_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"", "ocf_start_line" : "-c exec -r \"ocf_test_rsc\" -a \"start\" "+self.action_timeout, "ocf_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:start rc:ok op_status:complete\" ", "ocf_stop_line" : "-c exec -r \"ocf_test_rsc\" -a \"stop\" "+self.action_timeout, "ocf_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:stop rc:ok op_status:complete\" ", "ocf_monitor_line" : "-c exec -r \"ocf_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout, "ocf_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, "ocf_cancel_line" : "-c cancel -r \"ocf_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ", "ocf_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "systemd_reg_line" : "-c register_rsc -r systemd_test_rsc "+self.action_timeout+" -C systemd -T lrmd_dummy_daemon", "systemd_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"", "systemd_unreg_line" : "-c unregister_rsc -r \"systemd_test_rsc\" "+self.action_timeout, "systemd_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"", "systemd_start_line" : "-c exec -r \"systemd_test_rsc\" -a \"start\" "+self.action_timeout, "systemd_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:start rc:ok op_status:complete\" ", "systemd_stop_line" : "-c exec -r \"systemd_test_rsc\" -a \"stop\" "+self.action_timeout, "systemd_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:stop rc:ok op_status:complete\" ", "systemd_monitor_line" : "-c exec -r \"systemd_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout, "systemd_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, "systemd_cancel_line" : "-c cancel -r \"systemd_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ", "systemd_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "upstart_reg_line" : "-c register_rsc -r upstart_test_rsc "+self.action_timeout+" -C upstart -T lrmd_dummy_daemon", "upstart_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"", "upstart_unreg_line" : "-c unregister_rsc -r \"upstart_test_rsc\" "+self.action_timeout, "upstart_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"", "upstart_start_line" : "-c exec -r \"upstart_test_rsc\" -a \"start\" "+self.action_timeout, "upstart_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:start rc:ok op_status:complete\" ", "upstart_stop_line" : "-c exec -r \"upstart_test_rsc\" -a \"stop\" "+self.action_timeout, "upstart_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:stop rc:ok op_status:complete\" ", "upstart_monitor_line" : "-c exec -r \"upstart_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout, "upstart_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, "upstart_cancel_line" : "-c cancel -r \"upstart_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ", "upstart_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "service_reg_line" : "-c register_rsc -r service_test_rsc "+self.action_timeout+" -C service -T LSBDummy", "service_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:service_test_rsc action:none rc:ok op_status:complete\"", "service_unreg_line" : "-c unregister_rsc -r \"service_test_rsc\" "+self.action_timeout, "service_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:service_test_rsc action:none rc:ok op_status:complete\"", "service_start_line" : "-c exec -r \"service_test_rsc\" -a \"start\" "+self.action_timeout, "service_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:start rc:ok op_status:complete\" ", "service_stop_line" : "-c exec -r \"service_test_rsc\" -a \"stop\" "+self.action_timeout, "service_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:stop rc:ok op_status:complete\" ", "service_monitor_line" : "-c exec -r \"service_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout, "service_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, "service_cancel_line" : "-c cancel -r \"service_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ", "service_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "lsb_reg_line" : "-c register_rsc -r lsb_test_rsc "+self.action_timeout+" -C lsb -T LSBDummy", "lsb_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\" ", "lsb_unreg_line" : "-c unregister_rsc -r \"lsb_test_rsc\" "+self.action_timeout, "lsb_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\"", "lsb_start_line" : "-c exec -r \"lsb_test_rsc\" -a \"start\" "+self.action_timeout, "lsb_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:start rc:ok op_status:complete\" ", "lsb_stop_line" : "-c exec -r \"lsb_test_rsc\" -a \"stop\" "+self.action_timeout, "lsb_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:stop rc:ok op_status:complete\" ", "lsb_monitor_line" : "-c exec -r \"lsb_test_rsc\" -a status -i \"2000\" "+self.action_timeout, "lsb_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:complete\" "+self.action_timeout, "lsb_cancel_line" : "-c cancel -r \"lsb_test_rsc\" -a \"status\" -i \"2000\" -t \"6000\" ", "lsb_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:Cancelled\" ", "stonith_reg_line" : "-c register_rsc -r stonith_test_rsc "+self.action_timeout+" -C stonith -P pacemaker -T fence_dummy_monitor", "stonith_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\" ", "stonith_unreg_line" : "-c unregister_rsc -r \"stonith_test_rsc\" "+self.action_timeout, "stonith_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\"", "stonith_start_line" : "-c exec -r \"stonith_test_rsc\" -a \"start\" -t 8000 ", "stonith_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:start rc:ok op_status:complete\" ", "stonith_stop_line" : "-c exec -r \"stonith_test_rsc\" -a \"stop\" "+self.action_timeout, "stonith_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:stop rc:ok op_status:complete\" ", "stonith_monitor_line" : "-c exec -r \"stonith_test_rsc\" -a \"monitor\" -i \"2000\" "+self.action_timeout, "stonith_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, "stonith_cancel_line" : "-c cancel -r \"stonith_test_rsc\" -a \"monitor\" -i \"2000\" -t \"6000\" ", "stonith_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:Cancelled\" ", } def new_test(self, name, description): """ Create a named test """ test = Test(name, description, self.verbose, self.tls) self.tests.append(test) return test def setup_test_environment(self): """ Prepare the host before executing any tests """ os.system("service pacemaker_remote stop") self.cleanup_test_environment() if self.tls and not os.path.isfile("/etc/pacemaker/authkey"): self.need_authkey = 1 os.system("mkdir -p /etc/pacemaker") os.system("dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1") ### Make fake systemd daemon and unit file ### - dummy_daemon = """#!/usr/bin/python + dummy_daemon = """#!@PYTHON@ import time, systemd.daemon time.sleep(3) systemd.daemon.notify("READY=1") while True: time.sleep(5) """ dummy_service_file = """ [Unit] Description=Dummy resource that takes a while to start [Service] Type=notify ExecStart=/usr/sbin/lrmd_dummy_daemon """ dummy_upstart_job = (""" description "Dummy service for regression tests" exec dd if=/dev/random of=/dev/null """) - dummy_fence_sleep_agent = ("""#!/usr/bin/python + dummy_fence_sleep_agent = ("""#!@PYTHON@ import sys import time def main(): for line in sys.stdin: if line.count("monitor") > 0: time.sleep(30000) sys.exit(0) sys.exit(1) if __name__ == "__main__": main() """) dummy_fence_agent = ("""#!/bin/sh while read line; do case ${line} in *monitor*) exit 0;; *metadata*) echo '' echo ' dummy description.' echo ' http://www.example.com' echo ' ' echo ' ' echo ' ' echo ' ' echo ' Fencing Action' echo ' ' echo ' ' echo ' ' echo ' ' echo ' Physical plug number or name of virtual machine' echo ' ' echo ' ' echo ' ' echo ' ' echo ' ' echo ' ' echo ' ' echo ' ' echo '' exit 0;; esac exit 1 done """) os.system("cat <<-END >>/etc/init/lrmd_dummy_daemon.conf\n%s\nEND" % (dummy_upstart_job)) os.system("cat <<-END >>/usr/sbin/lrmd_dummy_daemon\n%s\nEND" % (dummy_daemon)) os.system("cat <<-END >>/lib/systemd/system/lrmd_dummy_daemon.service\n%s\nEND" % (dummy_service_file)) os.system("chmod a+x /usr/sbin/lrmd_dummy_daemon") os.system("cat <<-END >>/usr/sbin/fence_dummy_sleep\n%s\nEND" % (dummy_fence_sleep_agent)) os.system("chmod 711 /usr/sbin/fence_dummy_sleep") f = io.open("/usr/sbin/fence_dummy_monitor", 'w') f.write(dummy_fence_agent) f.close() os.system("chmod 711 /usr/sbin/fence_dummy_monitor") if os.path.exists("%s/cts/LSBDummy" % BUILD_DIR): os.system("cp %s/cts/LSBDummy /etc/init.d/LSBDummy" % BUILD_DIR) if not os.path.exists("@OCF_RA_DIR@/pacemaker"): os.system("mkdir -p @OCF_RA_DIR@/pacemaker/") # Install helper OCF agents for agent in ["Dummy", "Stateful", "ping"]: os.system("cp %s/extra/resources/%s @OCF_RA_DIR@/pacemaker/%s" % (BUILD_DIR, agent, agent)) os.system("chmod a+x @OCF_RA_DIR@/pacemaker/%s" % (agent)) else: # Assume it's installed os.system("cp @datadir@/@PACKAGE@/tests/cts/LSBDummy /etc/init.d/LSBDummy") os.system("chmod a+x /etc/init.d/LSBDummy") os.system("mkdir -p @CRM_CORE_DIR@/root") if os.path.exists("/bin/systemctl"): os.system("systemctl daemon-reload") def cleanup_test_environment(self): """ Clean up the host after executing desired tests """ if self.need_authkey: os.system("rm -f /etc/pacemaker/authkey") os.system("rm -f /etc/init.d/LSBDummy") os.system("rm -f /lib/systemd/system/lrmd_dummy_daemon.service") os.system("rm -f /usr/sbin/lrmd_dummy_daemon") os.system("rm -f /usr/sbin/fence_dummy_monitor") os.system("rm -f /usr/sbin/fence_dummy_sleep") if os.path.exists("/bin/systemctl"): os.system("systemctl daemon-reload") def build_generic_tests(self): """ Register tests that apply to all resource classes """ common_cmds = self.common_cmds ### register/unregister tests ### for rsc in self.rsc_classes: test = self.new_test("generic_registration_%s" % (rsc), "Simple resource registration test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### start/stop tests ### for rsc in self.rsc_classes: test = self.new_test("generic_start_stop_%s" % (rsc), "Simple start and stop test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### monitor cancel test ### for rsc in self.rsc_classes: test = self.new_test("generic_monitor_cancel_%s" % (rsc), "Simple monitor cancel test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### monitor duplicate test ### for rsc in self.rsc_classes: test = self.new_test("generic_monitor_duplicate_%s" % (rsc), "Test creation and canceling of duplicate monitors for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) # Add the duplicate monitors test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) # verify we still get update events ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) # cancel the monitor, if the duplicate merged with the original, we should no longer see monitor updates test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### stop implies cancel test ### for rsc in self.rsc_classes: test = self.new_test("generic_stop_implies_cancel_%s" % (rsc), "Verify stopping a resource implies cancel of recurring ops for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) def build_multi_rsc_tests(self): """ Register complex tests that involve managing multiple resouces of different types """ common_cmds = self.common_cmds # do not use service and systemd at the same time, it is the same resource. ### register start monitor stop unregister resources of each type at the same time. ### test = self.new_test("multi_rsc_start_stop_all", "Start, monitor, and stop resources of multiple types and classes") for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) for rsc in self.rsc_classes: ### If this fails, that means the monitor is not being rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) def build_negative_tests(self): """ Register tests related to how the lrmd handles failures """ ### ocf start timeout test ### test = self.new_test("ocf_start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" " + self.action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") # -t must be less than self.action_timeout test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -k \"op_sleep\" -v \"5\" -t 1000 -w") test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:Timed Out" ' + self.action_timeout) test.add_cmd("-c exec -r test_rsc -a stop " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### stonith start timeout test ### test = self.new_test("stonith_start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy_sleep\" " + self.action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 -w") # -t must be less than self.action_timeout test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:Timed Out" ' + self.action_timeout) test.add_cmd("-c exec -r test_rsc -a stop " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### stonith component fail ### common_cmds = self.common_cmds test = self.new_test("stonith_component_fail", "Kill stonith component after lrmd connects") test.add_cmd(common_cmds["stonith_reg_line"] + " " + common_cmds["stonith_reg_event"]) test.add_cmd(common_cmds["stonith_start_line"] + " " + common_cmds["stonith_start_event"]) test.add_cmd('-c exec -r "stonith_test_rsc" -a "monitor" -i "600000" -l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete" ' + self.action_timeout) test.add_cmd_and_kill("killall -9 -q stonithd lt-stonithd", '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:unknown error op_status:error" -t 15000') test.add_cmd(common_cmds["stonith_unreg_line"] + " " + common_cmds["stonith_unreg_event"]) ### monitor fail for ocf resources ### test = self.new_test("monitor_fail_ocf", "Force ocf monitor to fail, verify failure is reported.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" " + self.action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r "test_rsc" -a "monitor" -i "100" ' + self.action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"') test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' + self.action_timeout) test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' + self.action_timeout) test.add_cmd_and_kill("rm -f @localstatedir@/run/Dummy-test_rsc.state", '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete" -t 6000') test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" " + self.action_timeout, CrmExit.TIMEOUT) test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" " + self.action_timeout, CrmExit.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### verify notify changes only for monitor operation. ### test = self.new_test("monitor_changes_only", "Verify when flag is set, only monitor changes are notified.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+" -o " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r "test_rsc" -a "monitor" -i "100" ' + self.action_timeout + ' -o -l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ') test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd_and_kill("rm -f @localstatedir@/run/Dummy-test_rsc.state", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 6000") test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd('-c unregister_rsc -r "test_rsc" ' + self.action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete"') ### monitor fail for systemd resource ### if "systemd" in self.rsc_classes: test = self.new_test("monitor_fail_systemd", "Force systemd monitor to fail, verify failure is reported..") test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T lrmd_dummy_daemon "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd_and_kill("killall -9 -q lrmd_dummy_daemon", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 8000") test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### monitor fail for upstart resource ### if "upstart" in self.rsc_classes: test = self.new_test("monitor_fail_upstart", "Force upstart monitor to fail, verify failure is reported..") test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T lrmd_dummy_daemon "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd_and_kill("killall -9 -q dd", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 8000") test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Cancel non-existent operation on a resource ### test = self.new_test("cancel_non_existent_op", "Attempt to cancel the wrong monitor operation, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) test.add_expected_fail_cmd("-c cancel -r test_rsc -a \"monitor\" -i 1234 -t \"6000\" " ### interval is wrong, should fail "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-c cancel -r test_rsc -a stop -i 100 -t \"6000\" " ### action name is wrong, should fail "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Attempt to invoke non-existent rsc id ### test = self.new_test("invoke_non_existent_rsc", "Attempt to perform operations on a non-existent rsc id.") test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:complete\" ") test.add_expected_fail_cmd("-c exec -r test_rsc -a stop "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_expected_fail_cmd("-c exec -r test_rsc -a monitor -i 6000 "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_expected_fail_cmd("-c cancel -r test_rsc -a start "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register and start a resource that doesn't exist, systemd ### if "systemd" in self.rsc_classes: test = self.new_test("start_uninstalled_systemd", "Register uninstalled systemd agent, try to start, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T this_is_fake1234 "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") if "upstart" in self.rsc_classes: test = self.new_test("start_uninstalled_upstart", "Register uninstalled upstart agent, try to start, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T this_is_fake1234 "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register and start a resource that doesn't exist, ocf ### test = self.new_test("start_uninstalled_ocf", "Register uninstalled ocf agent, try to start, verify expected failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pacemaker -T this_is_fake1234 "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register ocf with non-existent provider ### test = self.new_test("start_ocf_bad_provider", "Register ocf agent with a non-existent provider, verify expected failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pancakes -T Dummy "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register ocf with empty provider field ### test = self.new_test("start_ocf_no_provider", "Register ocf agent with a no provider, verify expected failure.") test.add_expected_fail_cmd("-c register_rsc -r \"test_rsc\" -C ocf -T Dummy "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Error\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") def build_stress_tests(self): """ Register stress tests """ timeout = "-t 20000" iterations = 25 test = self.new_test("ocf_stress", "Verify OCF agent handling works under load") for i in range(iterations): test.add_cmd("-c register_rsc -r rsc_%s %s -C ocf -P heartbeat -T Dummy -l \"NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c exec -r rsc_%s -a start %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c exec -r rsc_%s -a monitor %s -i 1000 -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete\"" % (i, timeout, i)) for i in range(iterations): test.add_cmd("-c exec -r rsc_%s -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c unregister_rsc -r rsc_%s %s -l \"NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) if "systemd" in self.rsc_classes: test = self.new_test("systemd_stress", "Verify systemd dbus connection works under load") for i in range(iterations): test.add_cmd("-c register_rsc -r rsc_%s %s -C systemd -T lrmd_dummy_daemon -l \"NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c exec -r rsc_%s -a start %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c exec -r rsc_%s -a monitor %s -i 1000 -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete\"" % (i, timeout, i)) for i in range(iterations): test.add_cmd("-c exec -r rsc_%s -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c unregister_rsc -r rsc_%s %s -l \"NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) iterations = 9 timeout = "-t 30000" ### Verify recurring op in-flight collision is handled in series properly test = self.new_test("rsc_inflight_collision", "Verify recurring ops do not collide with other operations for the same rsc.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-c exec -r test_rsc -a start %s -k op_sleep -v 1 -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\"" % (timeout)) for i in range(iterations): test.add_cmd("-c exec -r test_rsc -a monitor %s -i 100%d -k op_sleep -v 2 -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\"" % (timeout, i)) # test.add_sys_cmd("sleep", "-al @CRM_RSCTMP_DIR@") test.add_cmd("-c exec -r test_rsc -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\"" % (timeout)) test.add_cmd("-c unregister_rsc -r test_rsc %s -l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\"" % (timeout)) def build_custom_tests(self): """ Register tests that target specific cases """ ### verify resource temporary folder is created and used by OCF agents. ### test = self.new_test("rsc_tmp_dir", "Verify creation and use of rsc temporary state directory") test.add_sys_cmd("ls", "-al @CRM_RSCTMP_DIR@") test.add_cmd("-c register_rsc -r test_rsc -P heartbeat -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-c exec -r test_rsc -a start -t 4000") test.add_sys_cmd("ls", "-al @CRM_RSCTMP_DIR@") test.add_sys_cmd("ls", "@CRM_RSCTMP_DIR@/Dummy-test_rsc.state") test.add_cmd("-c exec -r test_rsc -a stop -t 4000") test.add_cmd("-c unregister_rsc -r test_rsc "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### start delay then stop test ### test = self.new_test("start_delay", "Verify start delay works as expected.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-c exec -r test_rsc -s 6000 -a start -w -t 6000") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 2000", CrmExit.TIMEOUT) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 6000") test.add_cmd("-c exec -r test_rsc -a stop " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### start delay, but cancel before it gets a chance to start. ### test = self.new_test("start_delay_cancel", "Using start_delay, start a rsc, but cancel the start op before execution.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-c exec -r test_rsc -s 5000 -a start -w -t 4000") test.add_cmd("-c cancel -r test_rsc -a start " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 5000", CrmExit.TIMEOUT) test.add_cmd("-c unregister_rsc -r test_rsc " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register a bunch of resources, verify we can get info on them ### test = self.new_test("verify_get_rsc_info", "Register multiple resources, verify retrieval of rsc info.") if "systemd" in self.rsc_classes: test.add_cmd("-c register_rsc -r rsc1 -C systemd -T lrmd_dummy_daemon "+self.action_timeout) test.add_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c unregister_rsc -r rsc1 "+self.action_timeout) test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ") if "upstart" in self.rsc_classes: test.add_cmd("-c register_rsc -r rsc1 -C upstart -T lrmd_dummy_daemon "+self.action_timeout) test.add_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c unregister_rsc -r rsc1 "+self.action_timeout) test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout) test.add_cmd("-c get_rsc_info -r rsc2 ") test.add_cmd("-c unregister_rsc -r rsc2 "+self.action_timeout) test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ") ### Register duplicate, verify only one entry exists and can still be removed. test = self.new_test("duplicate_registration", "Register resource multiple times, verify only one entry exists and can be removed.") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Stateful -P pacemaker "+self.action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Stateful") test.add_cmd("-c unregister_rsc -r rsc2 "+self.action_timeout) test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ") ### verify the option to only send notification to the original client. ### test = self.new_test("notify_orig_client_only", "Verify option to only send notifications to the client originating the action.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" "+self.action_timeout+" -n " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") # this will fail because the monitor notifications should only go to the original caller, which no longer exists. test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"6000\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### get metadata ### test = self.new_test("get_ocf_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Dummy\"", "resource-agent name=\"Dummy\"") test.add_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Stateful\"") test.add_expected_fail_cmd("-c metadata -P \"pacemaker\" -T \"Stateful\"") test.add_expected_fail_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"fake_agent\"") ### get metadata ### test = self.new_test("get_lsb_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"lsb\" -T \"LSBDummy\"", "resource-agent name='LSBDummy'") ### get stonith metadata ### test = self.new_test("get_stonith_metadata", "Retrieve stonith metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy_monitor\"", "resource-agent name=\"fence_dummy_monitor\"") ### get metadata ### if "systemd" in self.rsc_classes: test = self.new_test("get_systemd_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"systemd\" -T \"lrmd_dummy_daemon\"", "resource-agent name=\"lrmd_dummy_daemon\"") ### get metadata ### if "upstart" in self.rsc_classes: test = self.new_test("get_upstart_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"upstart\" -T \"lrmd_dummy_daemon\"", "resource-agent name=\"lrmd_dummy_daemon\"") ### get ocf providers ### test = self.new_test("list_ocf_providers", "Retrieve list of available resource providers, verifies pacemaker is a provider.") test.add_cmd_check_stdout("-c list_ocf_providers ", "pacemaker") test.add_cmd_check_stdout("-c list_ocf_providers -T ping", "pacemaker") ### Verify agents only exist in their lists ### test = self.new_test("verify_agent_lists", "Verify the agent lists contain the right data.") test.add_cmd_check_stdout("-c list_agents ", "Stateful") ### ocf ### test.add_cmd_check_stdout("-c list_agents -C ocf", "Stateful") test.add_cmd_check_stdout("-c list_agents -C lsb", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C service", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents ", "LSBDummy") ### init.d ### test.add_cmd_check_stdout("-c list_agents -C lsb", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C ocf", "", "lrmd_dummy_daemon") ### should not exist test.add_cmd_check_stdout("-c list_agents -C ocf", "", "lrmd_dummy_daemon") ### should not exist test.add_cmd_check_stdout("-c list_agents -C lsb", "", "fence_dummy_monitor") ### should not exist test.add_cmd_check_stdout("-c list_agents -C service", "", "fence_dummy_monitor") ### should not exist test.add_cmd_check_stdout("-c list_agents -C ocf", "", "fence_dummy_monitor") ### should not exist if "systemd" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon") ### systemd ### test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C systemd", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C systemd", "lrmd_dummy_daemon") test.add_cmd_check_stdout("-c list_agents -C systemd", "", "fence_dummy_monitor") ### should not exist if "upstart" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon") ### upstart ### test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C upstart", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C upstart", "lrmd_dummy_daemon") test.add_cmd_check_stdout("-c list_agents -C upstart", "", "fence_dummy_monitor") ### should not exist if "stonith" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents -C stonith", "fence_dummy_monitor") ### stonith ### test.add_cmd_check_stdout("-c list_agents -C stonith", "", "lrmd_dummy_daemon") ### should not exist test.add_cmd_check_stdout("-c list_agents -C stonith", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents ", "fence_dummy_monitor") def print_list(self): """ List all registered tests """ print("\n==== %d TESTS FOUND ====" % (len(self.tests))) print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")) print("%35s - %s" % ("--------------------", "--------------------")) for test in self.tests: print("%35s - %s" % (test.name, test.description)) print("==== END OF LIST ====\n") def run_single(self, name): """ Run a single named test """ for test in self.tests: if test.name == name: test.run() break def run_tests_matching(self, pattern): """ Run all tests whose name matches a pattern """ for test in self.tests: if test.name.count(pattern) != 0: test.run() def run_tests(self): """ Run all tests """ for test in self.tests: test.run() def exit(self): """ Exit (with error status code if any test failed) """ for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != CrmExit.OK: sys.exit(CrmExit.ERROR) sys.exit(CrmExit.OK) def print_results(self): """ Print summary of results of executed tests """ failures = 0 success = 0 print("\n\n======= FINAL RESULTS ==========") print("\n--- FAILURE RESULTS:") for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != CrmExit.OK: failures = failures + 1 test.print_result(" ") else: success = success + 1 if failures == 0: print(" None") print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)) class TestOptions(object): """ Option handler """ def __init__(self): self.options = {} self.options['list-tests'] = 0 self.options['run-all'] = 1 self.options['run-only'] = "" self.options['run-only-pattern'] = "" self.options['verbose'] = 0 self.options['invalid-arg'] = "" self.options['show-usage'] = 0 self.options['pacemaker-remote'] = 0 def build_options(self, argv): """ Set options based on command-line arguments """ args = argv[1:] skip = 0 for i in range(0, len(args)): if skip: skip = 0 continue elif args[i] == "-h" or args[i] == "--help": self.options['show-usage'] = 1 elif args[i] == "-l" or args[i] == "--list-tests": self.options['list-tests'] = 1 elif args[i] == "-V" or args[i] == "--verbose": self.options['verbose'] = 1 elif args[i] == "-R" or args[i] == "--pacemaker-remote": self.options['pacemaker-remote'] = 1 elif args[i] == "-r" or args[i] == "--run-only": self.options['run-only'] = args[i+1] skip = 1 elif args[i] == "-p" or args[i] == "--run-only-pattern": self.options['run-only-pattern'] = args[i+1] skip = 1 def show_usage(self): """ Show command usage """ print("usage: " + sys.argv[0] + " [options]") print("If no options are provided, all tests will run") print("Options:") print("\t [--help | -h] Show usage") print("\t [--list-tests | -l] Print out all registered tests.") print("\t [--run-only | -r 'testname'] Run a specific test") print("\t [--verbose | -V] Verbose output") print("\t [--pacemaker-remote | -R Test pacemaker-remote binary instead of lrmd.") print("\t [--run-only-pattern | -p 'string'] Run only tests containing the string value") print("\n\tExample: Run only the test 'start_top'") print("\t\t python ./regression.py --run-only start_stop") print("\n\tExample: Run only the tests with the string 'systemd' present in them") print("\t\t python ./regression.py --run-only-pattern systemd") def main(argv): """ Run lrmd regression tests as specified by arguments """ update_path() opts = TestOptions() opts.build_options(argv) tests = Tests(opts.options['verbose'], opts.options['pacemaker-remote']) tests.build_generic_tests() tests.build_multi_rsc_tests() tests.build_negative_tests() tests.build_custom_tests() tests.build_stress_tests() tests.setup_test_environment() print("Starting ...") if opts.options['list-tests']: tests.print_list() elif opts.options['show-usage']: opts.show_usage() elif opts.options['run-only-pattern'] != "": tests.run_tests_matching(opts.options['run-only-pattern']) tests.print_results() elif opts.options['run-only'] != "": tests.run_single(opts.options['run-only']) tests.print_results() else: tests.run_tests() tests.print_results() tests.cleanup_test_environment() tests.exit() if __name__ == "__main__": main(sys.argv) diff --git a/m4/CC_CHECK_LDFLAGS.m4 b/m4/CC_CHECK_LDFLAGS.m4 new file mode 100644 index 0000000000..be20df3a94 --- /dev/null +++ b/m4/CC_CHECK_LDFLAGS.m4 @@ -0,0 +1,21 @@ +dnl Check if the flag is supported by linker (cacheable) +dnl CC_CHECK_LDFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) +dnl +dnl Origin (declared license: GPLv2+ with less restrictive exception): +dnl https://git.gnome.org/browse/glib/tree/m4macros/attributes.m4?h=2.49.1 +dnl (AC_LANG_PROGRAM substituted by Jan Pokorny ) + +AC_DEFUN([CC_CHECK_LDFLAGS], [ + AC_CACHE_CHECK([if $CC supports $1 flag], + AS_TR_SH([cc_cv_ldflags_$1]), + [ac_save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $1" + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[]])], + [eval "AS_TR_SH([cc_cv_ldflags_$1])='yes'"], + [eval "AS_TR_SH([cc_cv_ldflags_$1])="]) + LDFLAGS="$ac_save_LDFLAGS" + ]) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_ldflags_$1])[ = xyes], + [$2], [$3]) +]) diff --git a/acinclude.m4 b/m4/PKG_CHECK_VAR.m4 similarity index 51% rename from acinclude.m4 rename to m4/PKG_CHECK_VAR.m4 index 98ab8c6acd..2221a69eba 100644 --- a/acinclude.m4 +++ b/m4/PKG_CHECK_VAR.m4 @@ -1,51 +1,24 @@ -dnl -dnl local autoconf/automake macros for pacemaker -dnl - -dnl Check if the flag is supported by linker (cacheable) -dnl CC_CHECK_LDFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) -dnl -dnl Origin (declared license: GPLv2+ with less restrictive exception): -dnl https://git.gnome.org/browse/glib/tree/m4macros/attributes.m4?h=2.49.1 -dnl (AC_LANG_PROGRAM substituted by Jan Pokorny ) - -AC_DEFUN([CC_CHECK_LDFLAGS], [ - AC_CACHE_CHECK([if $CC supports $1 flag], - AS_TR_SH([cc_cv_ldflags_$1]), - [ac_save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $1" - AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[]])], - [eval "AS_TR_SH([cc_cv_ldflags_$1])='yes'"], - [eval "AS_TR_SH([cc_cv_ldflags_$1])="]) - LDFLAGS="$ac_save_LDFLAGS" - ]) - - AS_IF([eval test x$]AS_TR_SH([cc_cv_ldflags_$1])[ = xyes], - [$2], [$3]) -]) - - dnl PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE, dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) dnl ------------------------------------------- dnl Since: 0.28 dnl dnl Retrieves the value of the pkg-config variable for the given module. dnl dnl Origin (declared license: GPLv2+ with less restrictive exception): dnl https://cgit.freedesktop.org/pkg-config/tree/pkg.m4.in?h=pkg-config-0.29.1#n261 dnl (AS_VAR_COPY replaced with backward-compatible equivalent and guard dnl to prefer system-wide variant by Jan Pokorny ) m4_ifndef([PKG_CHECK_VAR],[ AC_DEFUN([PKG_CHECK_VAR], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl _PKG_CONFIG([$1], [variable="][$3]["], [$2]) dnl AS_VAR_COPY([$1], [pkg_cv_][$1]) $1=AS_VAR_GET([pkg_cv_][$1]) AS_VAR_IF([$1], [""], [$5], [$4])dnl ])dnl PKG_CHECK_VAR ])dnl m4_ifndef diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c index 723f704a2c..11fac800d9 100644 --- a/mcp/pacemaker.c +++ b/mcp/pacemaker.c @@ -1,1121 +1,1128 @@ /* * Copyright (C) 2010 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include gboolean pcmk_quorate = FALSE; gboolean fatal_error = FALSE; GMainLoop *mainloop = NULL; #define PCMK_PROCESS_CHECK_INTERVAL 5 const char *local_name = NULL; uint32_t local_nodeid = 0; crm_trigger_t *shutdown_trigger = NULL; const char *pid_file = "/var/run/pacemaker.pid"; typedef struct pcmk_child_s { int pid; long flag; int start_seq; int respawn_count; gboolean respawn; const char *name; const char *uid; const char *command; gboolean active_before_startup; } pcmk_child_t; /* Index into the array below */ #define pcmk_child_crmd 3 /* *INDENT-OFF* */ static pcmk_child_t pcmk_children[] = { { 0, crm_proc_none, 0, 0, FALSE, "none", NULL, NULL }, { 0, crm_proc_lrmd, 3, 0, TRUE, "lrmd", NULL, CRM_DAEMON_DIR"/lrmd" }, { 0, crm_proc_cib, 1, 0, TRUE, "cib", CRM_DAEMON_USER, CRM_DAEMON_DIR"/cib" }, { 0, crm_proc_crmd, 6, 0, TRUE, "crmd", CRM_DAEMON_USER, CRM_DAEMON_DIR"/crmd" }, { 0, crm_proc_attrd, 4, 0, TRUE, "attrd", CRM_DAEMON_USER, CRM_DAEMON_DIR"/attrd" }, { 0, crm_proc_stonithd, 0, 0, TRUE, "stonithd", NULL, NULL }, { 0, crm_proc_pe, 5, 0, TRUE, "pengine", CRM_DAEMON_USER, CRM_DAEMON_DIR"/pengine" }, { 0, crm_proc_stonith_ng, 2, 0, TRUE, "stonith-ng", NULL, CRM_DAEMON_DIR"/stonithd" }, }; /* *INDENT-ON* */ static gboolean start_child(pcmk_child_t * child); static gboolean check_active_before_startup_processes(gpointer user_data); void update_process_clients(crm_client_t *client); void update_process_peers(void); void enable_crmd_as_root(gboolean enable) { if (enable) { pcmk_children[pcmk_child_crmd].uid = NULL; } else { pcmk_children[pcmk_child_crmd].uid = CRM_DAEMON_USER; } } static uint32_t get_process_list(void) { int lpc = 0; uint32_t procs = crm_get_cluster_proc(); for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) { if (pcmk_children[lpc].pid != 0) { procs |= pcmk_children[lpc].flag; } } return procs; } static void pcmk_process_exit(pcmk_child_t * child) { child->pid = 0; child->active_before_startup = FALSE; /* Broadcast the fact that one of our processes died ASAP * * Try to get some logging of the cause out first though * because we're probably about to get fenced * * Potentially do this only if respawn_count > N * to allow for local recovery */ update_node_processes(local_nodeid, NULL, get_process_list()); child->respawn_count += 1; if (child->respawn_count > MAX_RESPAWN) { crm_err("Child respawn count exceeded by %s", child->name); child->respawn = FALSE; } if (shutdown_trigger) { mainloop_set_trigger(shutdown_trigger); update_node_processes(local_nodeid, NULL, get_process_list()); } else if (child->respawn && crm_is_true(getenv("PCMK_fail_fast"))) { crm_err("Rebooting system because of %s", child->name); pcmk_panic(__FUNCTION__); } else if (child->respawn) { crm_notice("Respawning failed child process: %s", child->name); start_child(child); } } static void pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode) { pcmk_child_t *child = mainloop_child_userdata(p); const char *name = mainloop_child_name(p); if (signo) { do_crm_log(((signo == SIGKILL)? LOG_WARNING : LOG_ERR), "%s[%d] terminated with signal %d (core=%d)", name, pid, signo, core); } else { switch(exitcode) { case CRM_EX_OK: crm_info("%s[%d] exited with status %d (%s)", name, pid, exitcode, crm_exit_str(exitcode)); break; case CRM_EX_FATAL: crm_warn("Shutting cluster down because %s[%d] had fatal failure", name, pid); child->respawn = FALSE; fatal_error = TRUE; pcmk_shutdown(SIGTERM); break; case CRM_EX_PANIC: do_crm_log_always(LOG_EMERG, "%s[%d] instructed the machine to reset", name, pid); child->respawn = FALSE; fatal_error = TRUE; pcmk_panic(__FUNCTION__); pcmk_shutdown(SIGTERM); break; default: crm_err("%s[%d] exited with status %d (%s)", name, pid, exitcode, crm_exit_str(exitcode)); break; } } pcmk_process_exit(child); } static gboolean stop_child(pcmk_child_t * child, int signal) { if (signal == 0) { signal = SIGTERM; } if (child->command == NULL) { crm_debug("Nothing to do for child \"%s\"", child->name); return TRUE; } if (child->pid <= 0) { crm_trace("Client %s not running", child->name); return TRUE; } errno = 0; if (kill(child->pid, signal) == 0) { crm_notice("Stopping %s "CRM_XS" sent signal %d to process %d", child->name, signal, child->pid); } else { crm_perror(LOG_ERR, "Could not stop %s (process %d) with signal %d", child->name, child->pid, signal); } return TRUE; } static char *opts_default[] = { NULL, NULL }; static char *opts_vgrind[] = { NULL, NULL, NULL, NULL, NULL }; static gboolean start_child(pcmk_child_t * child) { int lpc = 0; uid_t uid = 0; gid_t gid = 0; struct rlimit oflimits; gboolean use_valgrind = FALSE; gboolean use_callgrind = FALSE; const char *devnull = "/dev/null"; const char *env_valgrind = getenv("PCMK_valgrind_enabled"); const char *env_callgrind = getenv("PCMK_callgrind_enabled"); child->active_before_startup = FALSE; if (child->command == NULL) { crm_info("Nothing to do for child \"%s\"", child->name); return TRUE; } if (env_callgrind != NULL && crm_is_true(env_callgrind)) { use_callgrind = TRUE; use_valgrind = TRUE; } else if (env_callgrind != NULL && strstr(env_callgrind, child->name)) { use_callgrind = TRUE; use_valgrind = TRUE; } else if (env_valgrind != NULL && crm_is_true(env_valgrind)) { use_valgrind = TRUE; } else if (env_valgrind != NULL && strstr(env_valgrind, child->name)) { use_valgrind = TRUE; } if (use_valgrind && strlen(VALGRIND_BIN) == 0) { crm_warn("Cannot enable valgrind for %s:" " The location of the valgrind binary is unknown", child->name); use_valgrind = FALSE; } if (child->uid) { if (crm_user_lookup(child->uid, &uid, &gid) < 0) { crm_err("Invalid user (%s) for %s: not found", child->uid, child->name); return FALSE; } crm_info("Using uid=%u and group=%u for process %s", uid, gid, child->name); } child->pid = fork(); CRM_ASSERT(child->pid != -1); if (child->pid > 0) { /* parent */ mainloop_child_add(child->pid, 0, child->name, child, pcmk_child_exit); crm_info("Forked child %d for process %s%s", child->pid, child->name, use_valgrind ? " (valgrind enabled: " VALGRIND_BIN ")" : ""); update_node_processes(local_nodeid, NULL, get_process_list()); return TRUE; } else { /* Start a new session */ (void)setsid(); /* Setup the two alternate arg arrays */ opts_vgrind[0] = strdup(VALGRIND_BIN); if (use_callgrind) { opts_vgrind[1] = strdup("--tool=callgrind"); opts_vgrind[2] = strdup("--callgrind-out-file=" CRM_STATE_DIR "/callgrind.out.%p"); opts_vgrind[3] = strdup(child->command); opts_vgrind[4] = NULL; } else { opts_vgrind[1] = strdup(child->command); opts_vgrind[2] = NULL; opts_vgrind[3] = NULL; opts_vgrind[4] = NULL; } opts_default[0] = strdup(child->command); if(gid) { + // Whether we need root group access to talk to cluster layer + bool need_root_group = TRUE; + if (is_corosync_cluster()) { - /* Drop root privileges completely - * - * We can do this because we set uidgid.gid.${gid}=1 - * via CMAP which allows these processes to connect to - * corosync + /* Corosync clusters can drop root group access, because we set + * uidgid.gid.${gid}=1 via CMAP, which allows these processes to + * connect to corosync. */ - if (setgid(gid) < 0) { - crm_perror(LOG_ERR, "Could not set group to %d", gid); - } + need_root_group = FALSE; + } + + // Drop root group access if not needed + if (!need_root_group && (setgid(gid) < 0)) { + crm_perror(LOG_ERR, "Could not set group to %d", gid); + } - // Keep root group, but add haclient group so we can access ipc - } else if (initgroups(child->uid, gid) < 0) { + /* Initialize supplementary groups to only those always granted to + * the user, plus haclient (so we can access IPC). + */ + if (initgroups(child->uid, gid) < 0) { crm_err("Cannot initialize groups for %s: %s (%d)", child->uid, pcmk_strerror(errno), errno); } } if (uid && setuid(uid) < 0) { crm_perror(LOG_ERR, "Could not set user to %d (%s)", uid, child->uid); } /* Close all open file descriptors */ getrlimit(RLIMIT_NOFILE, &oflimits); for (lpc = 0; lpc < oflimits.rlim_cur; lpc++) { close(lpc); } (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ if (use_valgrind) { (void)execvp(VALGRIND_BIN, opts_vgrind); } else { (void)execvp(child->command, opts_default); } crm_perror(LOG_ERR, "FATAL: Cannot exec %s", child->command); crm_exit(CRM_EX_FATAL); } return TRUE; /* never reached */ } static gboolean escalate_shutdown(gpointer data) { pcmk_child_t *child = data; if (child->pid) { /* Use SIGSEGV instead of SIGKILL to create a core so we can see what it was up to */ crm_err("Child %s not terminating in a timely manner, forcing", child->name); stop_child(child, SIGSEGV); } return FALSE; } static gboolean pcmk_shutdown_worker(gpointer user_data) { static int phase = 0; static time_t next_log = 0; static int max = SIZEOF(pcmk_children); int lpc = 0; if (phase == 0) { crm_notice("Shutting down Pacemaker"); phase = max; /* Add a second, more frequent, check to speed up shutdown */ g_timeout_add_seconds(5, check_active_before_startup_processes, NULL); } for (; phase > 0; phase--) { /* Don't stop anything with start_seq < 1 */ for (lpc = max - 1; lpc >= 0; lpc--) { pcmk_child_t *child = &(pcmk_children[lpc]); if (phase != child->start_seq) { continue; } if (child->pid) { time_t now = time(NULL); if (child->respawn) { next_log = now + 30; child->respawn = FALSE; stop_child(child, SIGTERM); if (phase < pcmk_children[pcmk_child_crmd].start_seq) { g_timeout_add(180000 /* 3m */ , escalate_shutdown, child); } } else if (now >= next_log) { next_log = now + 30; crm_notice("Still waiting for %s to terminate " CRM_XS " pid=%d seq=%d", child->name, child->pid, child->start_seq); } return TRUE; } /* cleanup */ crm_debug("%s confirmed stopped", child->name); child->pid = 0; } } /* send_cluster_id(); */ crm_notice("Shutdown complete"); { const char *delay = daemon_option("shutdown_delay"); if(delay) { sync(); sleep(crm_get_msec(delay) / 1000); } } g_main_loop_quit(mainloop); if (fatal_error) { crm_notice("Shutting down and staying down after fatal error"); crm_exit(CRM_EX_FATAL); } return TRUE; } static void pcmk_ignore(int nsig) { crm_info("Ignoring signal %s (%d)", strsignal(nsig), nsig); } static void pcmk_sigquit(int nsig) { pcmk_panic(__FUNCTION__); } void pcmk_shutdown(int nsig) { if (shutdown_trigger == NULL) { shutdown_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, pcmk_shutdown_worker, NULL); } mainloop_set_trigger(shutdown_trigger); } static int32_t pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (crm_client_new(c, uid, gid) == NULL) { return -EIO; } return 0; } static void pcmk_ipc_created(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); } /* Exit code means? */ static int32_t pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; const char *task = NULL; crm_client_t *c = crm_client_get(qbc); xmlNode *msg = crm_ipcs_recv(c, data, size, &id, &flags); crm_ipcs_send_ack(c, id, flags, "ack", __FUNCTION__, __LINE__); if (msg == NULL) { return 0; } task = crm_element_value(msg, F_CRM_TASK); if (crm_str_eq(task, CRM_OP_QUIT, TRUE)) { /* Time to quit */ crm_notice("Shutting down in response to ticket %s (%s)", crm_element_value(msg, F_CRM_REFERENCE), crm_element_value(msg, F_CRM_ORIGIN)); pcmk_shutdown(15); } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) { /* Send to everyone */ struct iovec *iov; int id = 0; const char *name = NULL; crm_element_value_int(msg, XML_ATTR_ID, &id); name = crm_element_value(msg, XML_ATTR_UNAME); crm_notice("Instructing peers to remove references to node %s/%u", name, id); iov = calloc(1, sizeof(struct iovec)); iov->iov_base = dump_xml_unformatted(msg); iov->iov_len = 1 + strlen(iov->iov_base); send_cpg_iov(iov); } else { update_process_clients(c); } free_xml(msg); return 0; } /* Error code means? */ static int32_t pcmk_ipc_closed(qb_ipcs_connection_t * c) { crm_client_t *client = crm_client_get(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); crm_client_destroy(client); return 0; } static void pcmk_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); pcmk_ipc_closed(c); } struct qb_ipcs_service_handlers mcp_ipc_callbacks = { .connection_accept = pcmk_ipc_accept, .connection_created = pcmk_ipc_created, .msg_process = pcmk_ipc_dispatch, .connection_closed = pcmk_ipc_closed, .connection_destroyed = pcmk_ipc_destroy }; /*! * \internal * \brief Send an XML message with process list of all known peers to client(s) * * \param[in] client Send message to this client, or all clients if NULL */ void update_process_clients(crm_client_t *client) { GHashTableIter iter; crm_node_t *node = NULL; xmlNode *update = create_xml_node(NULL, "nodes"); if (is_corosync_cluster()) { crm_xml_add_int(update, "quorate", pcmk_quorate); } g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { xmlNode *xml = create_xml_node(update, "node"); crm_xml_add_int(xml, "id", node->id); crm_xml_add(xml, "uname", node->uname); crm_xml_add(xml, "state", node->state); crm_xml_add_int(xml, "processes", node->processes); } if(client) { crm_trace("Sending process list to client %s", client->id); crm_ipcs_send(client, 0, update, crm_ipc_server_event); } else { crm_trace("Sending process list to %d clients", crm_hash_table_size(client_connections)); g_hash_table_iter_init(&iter, client_connections); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & client)) { crm_ipcs_send(client, 0, update, crm_ipc_server_event); } } free_xml(update); } /*! * \internal * \brief Send a CPG message with local node's process list to all peers */ void update_process_peers(void) { /* Do nothing for corosync-2 based clusters */ char buffer[1024]; struct iovec *iov; int rc = 0; if (local_name) { rc = snprintf(buffer, SIZEOF(buffer), "", local_name, get_process_list()); } else { rc = snprintf(buffer, SIZEOF(buffer), "", get_process_list()); } crm_trace("Sending %s", buffer); iov = calloc(1, sizeof(struct iovec)); iov->iov_base = strdup(buffer); iov->iov_len = rc + 1; send_cpg_iov(iov); } /*! * \internal * \brief Update a node's process list, notifying clients and peers if needed * * \param[in] id Node ID of affected node * \param[in] uname Uname of affected node * \param[in] procs Affected node's process list mask * * \return TRUE if the process list changed, FALSE otherwise */ gboolean update_node_processes(uint32_t id, const char *uname, uint32_t procs) { gboolean changed = FALSE; crm_node_t *node = crm_get_peer(id, uname); if (procs != 0) { if (procs != node->processes) { crm_debug("Node %s now has process list: %.32x (was %.32x)", node->uname, procs, node->processes); node->processes = procs; changed = TRUE; /* If local node's processes have changed, notify clients/peers */ if (id == local_nodeid) { update_process_clients(NULL); update_process_peers(); } } else { crm_trace("Node %s still has process list: %.32x", node->uname, procs); } } return changed; } /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\tThis text"}, {"version", 0, 0, '$', "\tVersion information" }, {"verbose", 0, 0, 'V', "\tIncrease debug output"}, {"shutdown", 0, 0, 'S', "\tInstruct Pacemaker to shutdown on this machine"}, {"features", 0, 0, 'F', "\tDisplay the full version and list of features Pacemaker was built with"}, {"-spacer-", 1, 0, '-', "\nAdditional Options:"}, {"foreground", 0, 0, 'f', "\t(Ignored) Pacemaker always runs in the foreground"}, {"pid-file", 1, 0, 'p', "\t(Ignored) Daemon pid file location"}, {"standby", 0, 0, 's', "\tStart node in standby state"}, {NULL, 0, 0, 0} }; /* *INDENT-ON* */ static void mcp_chown(const char *path, uid_t uid, gid_t gid) { int rc = chown(path, uid, gid); if (rc < 0) { crm_warn("Cannot change the ownership of %s to user %s and gid %d: %s", path, CRM_DAEMON_USER, gid, pcmk_strerror(errno)); } } static gboolean check_active_before_startup_processes(gpointer user_data) { int start_seq = 1, lpc = 0; static int max = SIZEOF(pcmk_children); gboolean keep_tracking = FALSE; for (start_seq = 1; start_seq < max; start_seq++) { for (lpc = 0; lpc < max; lpc++) { if (pcmk_children[lpc].active_before_startup == FALSE) { /* we are already tracking it as a child process. */ continue; } else if (start_seq != pcmk_children[lpc].start_seq) { continue; } else { const char *name = pcmk_children[lpc].name; if (pcmk_children[lpc].flag == crm_proc_stonith_ng) { name = "stonithd"; } if (crm_pid_active(pcmk_children[lpc].pid, name) != 1) { crm_notice("Process %s terminated (pid=%d)", name, pcmk_children[lpc].pid); pcmk_process_exit(&(pcmk_children[lpc])); continue; } } /* at least one of the processes found at startup * is still going, so keep this recurring timer around */ keep_tracking = TRUE; } } return keep_tracking; } static bool find_and_track_existing_processes(void) { DIR *dp; struct dirent *entry; int start_tracker = 0; char entry_name[64]; dp = opendir("/proc"); if (!dp) { /* no proc directory to search through */ crm_notice("Can not read /proc directory to track existing components"); return FALSE; } while ((entry = readdir(dp)) != NULL) { int pid; int max = SIZEOF(pcmk_children); int i; if (crm_procfs_process_info(entry, entry_name, &pid) < 0) { continue; } for (i = 0; i < max; i++) { const char *name = pcmk_children[i].name; if (pcmk_children[i].start_seq == 0) { continue; } if (pcmk_children[i].flag == crm_proc_stonith_ng) { name = "stonithd"; } if (safe_str_eq(entry_name, name) && (crm_pid_active(pid, NULL) == 1)) { crm_notice("Tracking existing %s process (pid=%d)", name, pid); pcmk_children[i].pid = pid; pcmk_children[i].active_before_startup = TRUE; start_tracker = 1; break; } } } if (start_tracker) { g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL, check_active_before_startup_processes, NULL); } closedir(dp); return start_tracker; } static void init_children_processes(void) { int start_seq = 1, lpc = 0; static int max = SIZEOF(pcmk_children); /* start any children that have not been detected */ for (start_seq = 1; start_seq < max; start_seq++) { /* don't start anything with start_seq < 1 */ for (lpc = 0; lpc < max; lpc++) { if (pcmk_children[lpc].pid) { /* we are already tracking it */ continue; } if (start_seq == pcmk_children[lpc].start_seq) { start_child(&(pcmk_children[lpc])); } } } /* From this point on, any daemons being started will be due to * respawning rather than node start. * * This may be useful for the daemons to know */ setenv("PCMK_respawned", "true", 1); } static void mcp_cpg_destroy(gpointer user_data) { crm_err("Connection destroyed"); crm_exit(CRM_EX_DISCONNECT); } /*! * \internal * \brief Process a CPG message (process list or manual peer cache removal) * * \param[in] handle CPG connection (ignored) * \param[in] groupName CPG group name (ignored) * \param[in] nodeid ID of affected node * \param[in] pid Process ID (ignored) * \param[in] msg CPG XML message * \param[in] msg_len Length of msg in bytes (ignored) */ static void mcp_cpg_deliver(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { xmlNode *xml = string2xml(msg); const char *task = crm_element_value(xml, F_CRM_TASK); crm_trace("Received CPG message (%s): %.200s", (task? task : "process list"), (char*)msg); if (task == NULL) { if (nodeid == local_nodeid) { crm_debug("Ignoring message with local node's process list"); } else { uint32_t procs = 0; const char *uname = crm_element_value(xml, "uname"); crm_element_value_int(xml, "proclist", (int *)&procs); if (update_node_processes(nodeid, uname, procs)) { update_process_clients(NULL); } } } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) { int id = 0; const char *name = NULL; crm_element_value_int(xml, XML_ATTR_ID, &id); name = crm_element_value(xml, XML_ATTR_UNAME); reap_crm_member(id, name); } if (xml != NULL) { free_xml(xml); } } static void mcp_cpg_membership(cpg_handle_t handle, const struct cpg_name *groupName, const struct cpg_address *member_list, size_t member_list_entries, const struct cpg_address *left_list, size_t left_list_entries, const struct cpg_address *joined_list, size_t joined_list_entries) { /* Update peer cache if needed */ pcmk_cpg_membership(handle, groupName, member_list, member_list_entries, left_list, left_list_entries, joined_list, joined_list_entries); /* Always broadcast our own presence after any membership change */ update_process_peers(); } static gboolean mcp_quorum_callback(unsigned long long seq, gboolean quorate) { pcmk_quorate = quorate; return TRUE; } static void mcp_quorum_destroy(gpointer user_data) { crm_info("connection lost"); } int main(int argc, char **argv) { int rc; int flag; int argerr = 0; int option_index = 0; gboolean shutdown = FALSE; uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; struct rlimit cores; crm_ipc_t *old_instance = NULL; qb_ipcs_service_t *ipcs = NULL; const char *facility = daemon_option("logfacility"); static crm_cluster_t cluster; crm_log_preinit(NULL, argc, argv); crm_set_options(NULL, "mode [options]", long_options, "Start/Stop Pacemaker\n"); mainloop_add_signal(SIGHUP, pcmk_ignore); mainloop_add_signal(SIGQUIT, pcmk_sigquit); while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case 'f': /* Legacy */ break; case 'p': pid_file = optarg; break; case 's': set_daemon_option("node_start_state", "standby"); break; case '$': case '?': crm_help(flag, CRM_EX_OK); break; case 'S': shutdown = TRUE; break; case 'F': printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURE_SET, CRM_FEATURES); crm_exit(CRM_EX_OK); default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); } if (argerr) { crm_help('?', CRM_EX_USAGE); } setenv("LC_ALL", "C", 1); set_daemon_option("mcp", "true"); crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); /* Restore the original facility so that mcp_read_config() does the right thing */ set_daemon_option("logfacility", facility); crm_debug("Checking for old instances of %s", CRM_SYSTEM_MCP); old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0); crm_ipc_connect(old_instance); if (shutdown) { crm_debug("Terminating previous instance"); while (crm_ipc_connected(old_instance)) { xmlNode *cmd = create_request(CRM_OP_QUIT, NULL, NULL, CRM_SYSTEM_MCP, CRM_SYSTEM_MCP, NULL); crm_debug("."); crm_ipc_send(old_instance, cmd, 0, 0, NULL); free_xml(cmd); sleep(2); } crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_exit(CRM_EX_OK); } else if (crm_ipc_connected(old_instance)) { crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_err("Pacemaker is already active, aborting startup"); crm_exit(CRM_EX_FATAL); } crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); if (mcp_read_config() == FALSE) { crm_notice("Could not obtain corosync config data, exiting"); crm_exit(CRM_EX_UNAVAILABLE); } crm_notice("Starting Pacemaker %s "CRM_XS" build=%s features:%s", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); mainloop = g_main_new(FALSE); sysrq_init(); rc = getrlimit(RLIMIT_CORE, &cores); if (rc < 0) { crm_perror(LOG_ERR, "Cannot determine current maximum core size."); } else { if (cores.rlim_max == 0 && geteuid() == 0) { cores.rlim_max = RLIM_INFINITY; } else { crm_info("Maximum core file size is: %lu", (unsigned long)cores.rlim_max); } cores.rlim_cur = cores.rlim_max; rc = setrlimit(RLIMIT_CORE, &cores); if (rc < 0) { crm_perror(LOG_ERR, "Core file generation will remain disabled." " Core files are an important diagnositic tool," " please consider enabling them by default."); } #if 0 /* system() is not thread-safe, can't call from here * Actually, it's a pretty hacky way to try and achieve this anyway */ if (system("echo 1 > /proc/sys/kernel/core_uses_pid") != 0) { crm_perror(LOG_ERR, "Could not enable /proc/sys/kernel/core_uses_pid"); } #endif } if (crm_user_lookup(CRM_DAEMON_USER, &pcmk_uid, &pcmk_gid) < 0) { crm_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER); crm_exit(CRM_EX_NOUSER); } mkdir(CRM_STATE_DIR, 0750); mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid); /* Used to store core/blackbox/pengine/cib files in */ crm_build_path(CRM_PACEMAKER_DIR, 0750); mcp_chown(CRM_PACEMAKER_DIR, pcmk_uid, pcmk_gid); /* Used to store core files in */ crm_build_path(CRM_CORE_DIR, 0750); mcp_chown(CRM_CORE_DIR, pcmk_uid, pcmk_gid); /* Used to store blackbox dumps in */ crm_build_path(CRM_BLACKBOX_DIR, 0750); mcp_chown(CRM_BLACKBOX_DIR, pcmk_uid, pcmk_gid); /* Used to store policy engine inputs in */ crm_build_path(PE_STATE_DIR, 0750); mcp_chown(PE_STATE_DIR, pcmk_uid, pcmk_gid); /* Used to store the cluster configuration */ crm_build_path(CRM_CONFIG_DIR, 0750); mcp_chown(CRM_CONFIG_DIR, pcmk_uid, pcmk_gid); /* Resource agent paths are constructed by the lrmd */ ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, &mcp_ipc_callbacks); if (ipcs == NULL) { crm_err("Couldn't start IPC server"); crm_exit(CRM_EX_OSERR); } /* Allows us to block shutdown */ if (cluster_connect_cfg(&local_nodeid) == FALSE) { crm_err("Couldn't connect to Corosync's CFG service"); crm_exit(CRM_EX_PROTOCOL); } if(pcmk_locate_sbd() > 0) { setenv("PCMK_watchdog", "true", 1); } else { setenv("PCMK_watchdog", "false", 1); } find_and_track_existing_processes(); cluster.destroy = mcp_cpg_destroy; cluster.cpg.cpg_deliver_fn = mcp_cpg_deliver; cluster.cpg.cpg_confchg_fn = mcp_cpg_membership; crm_set_autoreap(FALSE); rc = pcmk_ok; if (cluster_connect_cpg(&cluster) == FALSE) { crm_err("Couldn't connect to Corosync's CPG service"); rc = -ENOPROTOOPT; } else if (cluster_connect_quorum(mcp_quorum_callback, mcp_quorum_destroy) == FALSE) { rc = -ENOTCONN; } else { local_name = get_local_node_name(); update_node_processes(local_nodeid, local_name, get_process_list()); mainloop_add_signal(SIGTERM, pcmk_shutdown); mainloop_add_signal(SIGINT, pcmk_shutdown); init_children_processes(); crm_info("Starting mainloop"); g_main_run(mainloop); } if (ipcs) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; } g_main_destroy(mainloop); cluster_disconnect_cpg(&cluster); cluster_disconnect_cfg(); crm_info("Exiting %s", crm_system_name); return crm_exit(crm_errno2exit(rc)); } diff --git a/pacemaker.spec.in b/pacemaker.spec.in index 4849ca2f08..d0198d6e2d 100644 --- a/pacemaker.spec.in +++ b/pacemaker.spec.in @@ -1,760 +1,763 @@ # Globals and defines to control package behavior (configure these as desired) ## User and group to use for nonprivileged services %global uname hacluster %global gname haclient ## Where to install Pacemaker documentation %global pcmk_docdir %{_docdir}/%{name} ## GitHub entity that distributes source (for ease of using a fork) %global github_owner ClusterLabs ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) %global pcmkversion 1.1.18 %global specversion 1 ## Upstream commit (or git tag, such as "Pacemaker-" plus the ## {pcmkversion} macro for an official release) to use for this package %global commit HEAD ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %global commit_abbrev 7 +## Path to Python interpreter (leave commented to auto-detect, +## or uncomment and edit to use a specific version) +#%%global python_path /usr/bin/python%{python3_pkgversion} # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) ## Short version of git commit %define shortcommit %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo ${c:10};; *%{rparen} echo ${c:0:%{commit_abbrev}};; esac) ## Whether this is a tagged release %define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?) ## Whether this is a release candidate (in case of a tagged release) %define pre_release %([ "%{tag_release}" -eq 0 ] || { case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;; esac; }; echo $?) ## Turn off auto-compilation of python files outside site-packages directory, ## so that the -libs-devel package is multilib-compliant (no *.py[co] files) %global __os_install_post %(echo '%{__os_install_post}' | { sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) ## Heuristic used to infer bleeding-edge deployments that are ## less likely to have working versions of the documentation tools %define bleeding %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) ## Corosync version %define cs_version %(pkg-config corosync --modversion 2>/dev/null | awk -F . '{print $1}') ## Where to install python site libraries (currently, this uses the unversioned ## python_sitearch macro to get the default system python, but at some point, ## we should explicitly choose python2_sitearch or python3_sitearch -- or both) %define py_site %{?python_sitearch}%{!?python_sitearch:%( python -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} ## Whether this platform defaults to using systemd as an init system ## (needs to be evaluated prior to BuildRequires being enumerated and ## installed as it's intended to conditionally select some of these, and ## for that there are only few indicators with varying reliability: ## - presence of systemd-defined macros (when building in a full-fledged ## environment, which is not the case with ordinary mock-based builds) ## - systemd-aware rpm as manifested with the presence of particular ## macro (rpm itself will trivially always be present when building) ## - existence of /usr/lib/os-release file, which is something heavily ## propagated by systemd project ## - when not good enough, there's always a possibility to check ## particular distro-specific macros (incl. version comparison) %define systemd_native (%{?_unitdir:1}%{?!_unitdir:0}%{nil \ } || %{?__transaction_systemd_inhibit:1}%{?!__transaction_systemd_inhibit:0}%{nil \ } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) - # Definitions for backward compatibility with older RPM versions ## Ensure the license macro behaves consistently (older RPM will otherwise ## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts: ## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77 %if !%{defined _licensedir} %define description %{lua: rpm.define("license %doc") print("%description") } %endif # Define conditionals so that "rpmbuild --with " and # "rpmbuild --without " can enable and disable specific features ## Add option to enable support for stonith/external fencing agents %bcond_with stonithd ## Add option to create binaries suitable for use with profiling tools %bcond_with profiling ## Add option to create binaries with coverage analysis %bcond_with coverage ## Add option to skip generating documentation ## (the build tools aren't available everywhere) %bcond_without doc ## Add option to prefix package version with "0." ## (so later "official" packages will be considered updates) %bcond_with pre_release ## Add option to ship Upstart job files %bcond_with upstart_job ## Add option to turn off hardening of libraries and daemon executables %bcond_without hardening # Keep sane profiling data if requested %if %{with profiling} ## Disable -debuginfo package and stripping binaries/libraries %define debug_package %{nil} %endif # Define the release version # (do not look at externally enforced pre-release flag for tagged releases # as only -rc tags, captured with the second condition, implies that then) %if (!%{tag_release} && %{with pre_release}) || 0%{pre_release} %if 0%{pre_release} %define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) %else %define pcmk_release 0.%{specversion}.%{shortcommit}.git %endif %else %if 0%{tag_release} %define pcmk_release %{specversion} %else %define pcmk_release %{specversion}.%{shortcommit}.git %endif %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Url: http://www.clusterlabs.org Group: System Environment/Daemons # Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL: # https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) AutoReqProv: on Requires: resource-agents Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cluster-libs = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} %if %{defined systemd_requires} %systemd_requires %endif # Pacemaker targets compatibility with python 2.7 and 3.2+ Requires: python >= 2.7 BuildRequires: python-devel >= 2.7 # Pacemaker requires a minimum libqb functionality Requires: libqb >= 0.13.0 BuildRequires: libqb-devel >= 0.13.0 # Basics required for the build (even if usually satisfied through other BRs) BuildRequires: coreutils findutils grep sed # Required for core functionality BuildRequires: automake autoconf libtool pkgconfig libtool-ltdl-devel BuildRequires: pkgconfig(glib-2.0) >= 2.16 BuildRequires: libxml2-devel libxslt-devel libuuid-devel BuildRequires: bzip2-devel pam-devel # Required for agent_config.h which specifies the correct scratch directory BuildRequires: resource-agents # Enables optional functionality BuildRequires: ncurses-devel docbook-style-xsl BuildRequires: bison byacc flex help2man gnutls-devel pkgconfig(dbus-1) %if %{systemd_native} BuildRequires: pkgconfig(systemd) %endif Requires: corosync BuildRequires: corosynclib-devel %if %{with stonithd} BuildRequires: cluster-glue-libs-devel %endif ## (note no avoiding effect when building through non-customized mock) %if !%{bleeding} %if %{with doc} BuildRequires: publican inkscape asciidoc %endif %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager. It supports more than 16 node clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --with(out) : coverage doc stonithd hardening pre_release profiling upstart_job %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} Requires: perl-TimeDate %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{name}-libs License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries Group: System Environment/Daemons %description -n %{name}-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-libs package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package -n %{name}-cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} %description -n %{name}-cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package remote %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Summary: Pacemaker remote daemon for non-cluster nodes Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents %if %{defined systemd_requires} %systemd_requires %endif %description remote Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-remote package contains the Pacemaker Remote daemon which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{name}-libs-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{name}-cts = %{version}-%{release} Requires: %{name}-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: libtool-ltdl-devel%{?_isa} libuuid-devel%{?_isa} Requires: libxml2-devel%{?_isa} libxslt-devel%{?_isa} Requires: bzip2-devel%{?_isa} glib2-devel%{?_isa} Requires: libqb-devel%{?_isa} corosynclib-devel%{?_isa} %description -n %{name}-libs-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-libs-devel package contains headers and shared libraries for developing tools for Pacemaker. # NOTE: can be noarch if lrmd_test is moved to another subpackage %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: python >= 2.7 Requires: %{name}-libs = %{version}-%{release} # systemd python bindings are separate package in some distros %if %{defined systemd_requires} %if 0%{?fedora} > 22 Requires: python2-systemd %else %if 0%{?fedora} > 20 || 0%{?rhel} > 6 Requires: systemd-python %endif %endif %endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: CC-BY-SA Summary: Documentation for Pacemaker Group: Documentation %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager. %prep %setup -q -n %{name}-%{commit} # Force the local time # # 'git' sets the file date to the date of the last commit. # This can result in files having been created in the future # when building on machines in timezones 'behind' the one the # commit occurred in - which seriously confuses 'make' find . -exec touch \{\} \; %build # Early versions of autotools (e.g. RHEL <= 5) do not support --docdir export docdir=%{pcmk_docdir} export systemdunitdir=%{?_unitdir}%{?!_unitdir:no} %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will # use its own defaults otherwise; if such hardenings are completely # undesired, rpmbuild using "--without hardening" # (or "--define '_without_hardening 1'") export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %endif ./autogen.sh %{configure} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{!?with_doc: --with-brand=} \ %{!?with_hardening: --disable-hardening} \ + %{?python_path: PYTHON=%{python_path}} \ --with-initdir=%{_initrddir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif make %{_smp_mflags} V=1 all %check -{ pengine/regression.sh --run one-or-more-unrunnnable-instances \ +{ pengine/regression.sh --run one-or-more-unrunnable-instances \ && tools/regression.sh \ && touch .CHECKED } 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint [ -f .CHECKED ] && rm -f -- .CHECKED || false %install rm -rf %{buildroot} make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} V=1 install mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 mcp/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon %if %{with upstart_job} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init install -m 644 mcp/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf install -m 644 mcp/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf %endif %if %{defined _unitdir} mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif # Scripts that should be executable chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py # These are not actually scripts find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x # Don't package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # Do not package these either rm -f %{buildroot}/%{_libdir}/service_crm.so # Don't ship init scripts for systemd based platforms %if %{defined _unitdir} rm -f %{buildroot}/%{_initrddir}/pacemaker rm -f %{buildroot}/%{_initrddir}/pacemaker_remote %endif %if %{with coverage} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %clean rm -rf %{buildroot} %post %if %{defined _unitdir} %systemd_post pacemaker.service %else /sbin/chkconfig --add pacemaker || : %endif %preun %if %{defined _unitdir} %systemd_preun pacemaker.service %else /sbin/service pacemaker stop >/dev/null 2>&1 || : if [ $1 -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker || : fi %endif %postun %if %{defined _unitdir} %systemd_postun_with_restart pacemaker.service %endif %pre remote %if %{defined _unitdir} # Stop the service before anything is touched, and remember to restart # it as one of the last actions (compared to using systemd_postun_with_restart, # this avoids suicide when sbd is in use) systemctl --quiet is-active pacemaker_remote if [ $? -eq 0 ] ; then mkdir -p %{_localstatedir}/lib/rpm-state/%{name} touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote systemctl stop pacemaker_remote >/dev/null 2>&1 else rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post remote %if %{defined _unitdir} %systemd_post pacemaker_remote.service %else /sbin/chkconfig --add pacemaker_remote || : %endif %preun remote %if %{defined _unitdir} %systemd_preun pacemaker_remote.service %else /sbin/service pacemaker_remote stop >/dev/null 2>&1 || : if [ $1 -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker_remote || : fi %endif %postun remote %if %{defined _unitdir} # This next line is a no-op, because we stopped the service earlier, but # we leave it here because it allows us to revert to the standard behavior # in the future if desired %systemd_postun_with_restart pacemaker_remote.service # Explicitly take care of removing the flag-file(s) upon final removal if [ $1 -eq 0 ] ; then rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %posttrans remote %if %{defined _unitdir} if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then systemctl start pacemaker_remote >/dev/null 2>&1 rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post cli %if %{defined _unitdir} %systemd_post crm_mon.service %endif %preun cli %if %{defined _unitdir} %systemd_preun crm_mon.service %endif %postun cli %if %{defined _unitdir} %systemd_postun_with_restart crm_mon.service %endif %pre -n %{name}-libs getent group %{gname} >/dev/null || groupadd -r %{gname} -g 189 getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u 189 -s /sbin/nologin -c "cluster user" %{uname} exit 0 %post -n %{name}-libs -p /sbin/ldconfig %postun -n %{name}-libs -p /sbin/ldconfig %post -n %{name}-cluster-libs -p /sbin/ldconfig %postun -n %{name}-cluster-libs -p /sbin/ldconfig %files ########################################################### %defattr(-,root,root) %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %if %{defined _unitdir} %{_unitdir}/pacemaker.service %else %{_initrddir}/pacemaker %endif %exclude %{_libexecdir}/pacemaker/lrmd_test %exclude %{_sbindir}/pacemaker_remoted %{_libexecdir}/pacemaker/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/crm_node %{_sbindir}/fence_legacy %{_sbindir}/stonith_admin %doc %{_mandir}/man7/crmd.* %doc %{_mandir}/man7/pengine.* %doc %{_mandir}/man7/stonithd.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* %doc %{_mandir}/man7/ocf_pacemaker_o2cb.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_node.* %doc %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/fence_legacy.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_mandir}/man8/stonith_admin.* %doc %{_datadir}/pacemaker/alerts %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine /usr/lib/ocf/resource.d/pacemaker/controld /usr/lib/ocf/resource.d/pacemaker/o2cb /usr/lib/ocf/resource.d/pacemaker/remote %if "%{?cs_version}" != "UNKNOWN" %if 0%{?cs_version} < 2 %{_libexecdir}/lcrso/pacemaker.lcrso %endif %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/pacemaker.conf %config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf %endif %files cli %defattr(-,root,root) %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %if %{defined _unitdir} %{_unitdir}/crm_mon.service %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/crm_mon.conf %endif %{_sbindir}/attrd_updater %{_sbindir}/cibadmin %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount %{_sbindir}/crm_mon %{_sbindir}/crm_resource %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/crm_shadow %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket %exclude %{_datadir}/pacemaker/alerts %exclude %{_datadir}/pacemaker/tests %{_datadir}/pacemaker %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude /usr/lib/ocf/resource.d/pacemaker/controld %exclude /usr/lib/ocf/resource.d/pacemaker/o2cb %exclude /usr/lib/ocf/resource.d/pacemaker/remote %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %doc %{_mandir}/man7/* %exclude %{_mandir}/man7/crmd.* %exclude %{_mandir}/man7/pengine.* %exclude %{_mandir}/man7/stonithd.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_node.* %exclude %{_mandir}/man8/crm_master.* %exclude %{_mandir}/man8/fence_legacy.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/pacemaker_remoted.* %exclude %{_mandir}/man8/stonith_admin.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %files -n %{name}-libs %defattr(-,root,root) %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpengine.so.* %{_libdir}/libstonithd.so.* %{_libdir}/libtransitioner.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles %files -n %{name}-cluster-libs %defattr(-,root,root) %{_libdir}/libcrmcluster.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files remote %defattr(-,root,root) %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} # state directory is shared between the subpackets # let rpm take care of removing it once it isn't # referenced anymore and empty %ghost %dir %{_localstatedir}/lib/rpm-state/%{name} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote %endif %{_sbindir}/pacemaker_remoted %{_mandir}/man8/pacemaker_remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files doc %defattr(-,root,root) %doc %{pcmk_docdir} %license licenses/CC-BY-SA-4.0 %files cts %defattr(-,root,root) %{py_site}/cts %{_datadir}/pacemaker/tests/cts %{_libexecdir}/pacemaker/lrmd_test %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files -n %{name}-libs-devel %defattr(-,root,root) %exclude %{_datadir}/pacemaker/tests/cts %{_datadir}/pacemaker/tests %{_includedir}/pacemaker %{_libdir}/*.so %if %{with coverage} %{_var}/lib/pacemaker/gcov %endif %{_libdir}/pkgconfig/*.pc %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %changelog diff --git a/pengine/container.c b/pengine/container.c index f5d916c805..a974d36466 100644 --- a/pengine/container.c +++ b/pengine/container.c @@ -1,971 +1,971 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #define VARIANT_CONTAINER 1 #include static bool is_child_container_node(container_variant_data_t *data, pe_node_t *node) { for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(node->details == tuple->node->details) { return TRUE; } } return FALSE; } gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); void distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes, int max, int per_host_max, pe_working_set_t * data_set); static GListPtr get_container_list(resource_t *rsc) { GListPtr containers = NULL; container_variant_data_t *data = NULL; if(rsc->variant == pe_container) { get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; containers = g_list_append(containers, tuple->docker); } } return containers; } static GListPtr get_containers_or_children(resource_t *rsc) { GListPtr containers = NULL; container_variant_data_t *data = NULL; if(rsc->variant == pe_container) { get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; containers = g_list_append(containers, tuple->docker); } return containers; } else { return rsc->children; } } static bool migration_threshold_reached(resource_t *rsc, node_t *node, pe_working_set_t *data_set) { int fail_count, countdown; /* Migration threshold of 0 means never force away */ if (rsc->migration_threshold == 0) { return FALSE; } // If we're ignoring failures, also ignore the migration threshold if (is_set(rsc->flags, pe_rsc_failure_ignored)) { return FALSE; } /* If there are no failures, there's no need to force away */ fail_count = pe_get_failcount(node, rsc, NULL, pe_fc_effective|pe_fc_fillers, NULL, data_set); if (fail_count <= 0) { return FALSE; } /* How many more times recovery will be tried on this node */ countdown = QB_MAX(rsc->migration_threshold - fail_count, 0); if (countdown == 0) { crm_warn("Forcing %s away from %s after %d failures (max=%d)", rsc->id, node->details->uname, fail_count, rsc->migration_threshold); return TRUE; } crm_info("%s can fail %d more times on %s before being forced off", rsc->id, countdown, node->details->uname); return FALSE; } node_t * container_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set) { GListPtr containers = NULL; GListPtr nodes = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return NULL); get_container_variant_data(container_data, rsc); set_bit(rsc->flags, pe_rsc_allocating); containers = get_container_list(rsc); dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL); containers = g_list_sort_with_data(containers, sort_clone_instance, data_set); distribute_children(rsc, containers, nodes, container_data->replicas, container_data->replicas_per_host, data_set); g_list_free(nodes); g_list_free(containers); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; pe_node_t *docker_host = tuple->docker->allocated_to; CRM_ASSERT(tuple); if(tuple->ip) { tuple->ip->cmds->allocate(tuple->ip, prefer, data_set); } if(tuple->remote && is_remote_node(docker_host)) { /* We need 'nested' connection resources to be on the same * host because pacemaker-remoted only supports a single * active connection */ rsc_colocation_new("child-remote-with-docker-remote", NULL, INFINITY, tuple->remote, docker_host->details->remote_rsc, NULL, NULL, data_set); } if(tuple->remote) { tuple->remote->cmds->allocate(tuple->remote, prefer, data_set); } // Explicitly allocate tuple->child before the container->child if(tuple->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, tuple->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if(node->details != tuple->node->details) { node->weight = -INFINITY; } else if(migration_threshold_reached(tuple->child, node, data_set) == FALSE) { node->weight = INFINITY; } } set_bit(tuple->child->parent->flags, pe_rsc_allocating); tuple->child->cmds->allocate(tuple->child, tuple->node, data_set); clear_bit(tuple->child->parent->flags, pe_rsc_allocating); } } if(container_data->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, container_data->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if(is_child_container_node(container_data, node)) { node->weight = 0; } else { node->weight = -INFINITY; } } container_data->child->cmds->allocate(container_data->child, prefer, data_set); } clear_bit(rsc->flags, pe_rsc_allocating); clear_bit(rsc->flags, pe_rsc_provisional); return NULL; } void container_create_actions(resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *action = NULL; GListPtr containers = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); containers = get_container_list(rsc); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { tuple->ip->cmds->create_actions(tuple->ip, data_set); } if(tuple->docker) { tuple->docker->cmds->create_actions(tuple->docker, data_set); } if(tuple->remote) { tuple->remote->cmds->create_actions(tuple->remote, data_set); } } clone_create_pseudo_actions(rsc, containers, NULL, NULL, data_set); if(container_data->child) { container_data->child->cmds->create_actions(container_data->child, data_set); if(container_data->child->variant == pe_master) { /* promote */ action = create_pseudo_resource_op(rsc, RSC_PROMOTE, TRUE, TRUE, data_set); action = create_pseudo_resource_op(rsc, RSC_PROMOTED, TRUE, TRUE, data_set); action->priority = INFINITY; /* demote */ action = create_pseudo_resource_op(rsc, RSC_DEMOTE, TRUE, TRUE, data_set); action = create_pseudo_resource_op(rsc, RSC_DEMOTED, TRUE, TRUE, data_set); action->priority = INFINITY; } } g_list_free(containers); } void container_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); if(container_data->child) { new_rsc_order(rsc, RSC_START, container_data->child, RSC_START, pe_order_implies_first_printed, data_set); new_rsc_order(rsc, RSC_STOP, container_data->child, RSC_STOP, pe_order_implies_first_printed, data_set); if(container_data->child->children) { new_rsc_order(container_data->child, RSC_STARTED, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(container_data->child, RSC_STOPPED, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); } else { new_rsc_order(container_data->child, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(container_data->child, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); } } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); CRM_ASSERT(tuple->docker); tuple->docker->cmds->internal_constraints(tuple->docker, data_set); order_start_start(rsc, tuple->docker, pe_order_runnable_left | pe_order_implies_first_printed); if(tuple->child) { order_stop_stop(rsc, tuple->child, pe_order_implies_first_printed); } order_stop_stop(rsc, tuple->docker, pe_order_implies_first_printed); new_rsc_order(tuple->docker, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); new_rsc_order(tuple->docker, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if(tuple->ip) { tuple->ip->cmds->internal_constraints(tuple->ip, data_set); // Start ip then docker new_rsc_order(tuple->ip, RSC_START, tuple->docker, RSC_START, pe_order_runnable_left|pe_order_preserve, data_set); new_rsc_order(tuple->docker, RSC_STOP, tuple->ip, RSC_STOP, pe_order_implies_first|pe_order_preserve, data_set); rsc_colocation_new("ip-with-docker", NULL, INFINITY, tuple->ip, tuple->docker, NULL, NULL, data_set); } if(tuple->remote) { /* This handles ordering and colocating remote relative to docker * (via "resource-with-container"). Since IP is also ordered and * colocated relative to docker, we don't need to do anything * explicit here with IP. */ tuple->remote->cmds->internal_constraints(tuple->remote, data_set); } if(tuple->child) { CRM_ASSERT(tuple->remote); // Start of the remote then child is implicit in the PE's remote logic } } if(container_data->child) { container_data->child->cmds->internal_constraints(container_data->child, data_set); if(container_data->child->variant == pe_master) { master_promotion_constraints(rsc, data_set); /* child demoted before global demoted */ new_rsc_order(container_data->child, RSC_DEMOTED, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set); /* global demote before child demote */ new_rsc_order(rsc, RSC_DEMOTE, container_data->child, RSC_DEMOTE, pe_order_implies_first_printed, data_set); /* child promoted before global promoted */ new_rsc_order(container_data->child, RSC_PROMOTED, rsc, RSC_PROMOTED, pe_order_implies_then_printed, data_set); /* global promote before child promote */ new_rsc_order(rsc, RSC_PROMOTE, container_data->child, RSC_PROMOTE, pe_order_implies_first_printed, data_set); } } else { // int type = pe_order_optional | pe_order_implies_then | pe_order_restart; // custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL, // rsc, generate_op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set); } } static resource_t * find_compatible_tuple_by_node(resource_t * rsc_lh, node_t * candidate, resource_t * rsc, enum rsc_role_e filter, gboolean current) { container_variant_data_t *container_data = NULL; CRM_CHECK(candidate != NULL, return NULL); get_container_variant_data(container_data, rsc); crm_trace("Looking for compatible child from %s for %s on %s", rsc_lh->id, rsc->id, candidate->details->uname); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(is_child_compatible(tuple->docker, candidate, filter, current)) { crm_trace("Pairing %s with %s on %s", rsc_lh->id, tuple->docker->id, candidate->details->uname); return tuple->docker; } } crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id); return NULL; } static resource_t * find_compatible_tuple(resource_t *rsc_lh, resource_t * rsc, enum rsc_role_e filter, gboolean current) { GListPtr scratch = NULL; resource_t *pair = NULL; node_t *active_node_lh = NULL; active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current); if (active_node_lh) { return find_compatible_tuple_by_node(rsc_lh, active_node_lh, rsc, filter, current); } scratch = g_hash_table_get_values(rsc_lh->allowed_nodes); scratch = g_list_sort_with_data(scratch, sort_node_weight, NULL); for (GListPtr gIter = scratch; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; pair = find_compatible_tuple_by_node(rsc_lh, node, rsc, filter, current); if (pair) { goto done; } } - pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, rsc->id); + pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none")); done: g_list_free(scratch); return pair; } void container_rsc_colocation_lh(resource_t * rsc, resource_t * rsc_rh, rsc_colocation_t * constraint) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ CRM_ASSERT(FALSE); } int copies_per_node(resource_t * rsc) { /* Strictly speaking, there should be a 'copies_per_node' addition * to the resource function table and each case would be a * function. However that would be serious overkill to return an * int. In fact, it seems to me that both function tables * could/should be replaced by resources.{c,h} full of * rsc_{some_operation} functions containing a switch as below * which calls out to functions named {variant}_{some_operation} * as needed. */ switch(rsc->variant) { case pe_unknown: return 0; case pe_native: case pe_group: return 1; case pe_clone: case pe_master: { const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX); return crm_parse_int(max_clones_node, "1"); } case pe_container: { container_variant_data_t *data = NULL; get_container_variant_data(data, rsc); return data->replicas_per_host; } } return 0; } void container_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc, rsc_colocation_t * constraint) { GListPtr allocated_rhs = NULL; container_variant_data_t *container_data = NULL; CRM_CHECK(constraint != NULL, return); CRM_CHECK(rsc_lh != NULL, pe_err("rsc_lh was NULL for %s", constraint->id); return); CRM_CHECK(rsc != NULL, pe_err("rsc was NULL for %s", constraint->id); return); CRM_ASSERT(rsc_lh->variant == pe_native); if (is_set(rsc->flags, pe_rsc_provisional)) { pe_rsc_trace(rsc, "%s is still provisional", rsc->id); return; } else if(constraint->rsc_lh->variant > pe_group) { resource_t *rh_child = find_compatible_tuple(rsc_lh, rsc, RSC_ROLE_UNKNOWN, FALSE); if (rh_child) { pe_rsc_debug(rsc, "Pairing %s with %s", rsc_lh->id, rh_child->id); rsc_lh->cmds->rsc_colocation_lh(rsc_lh, rh_child, constraint); } else if (constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", rsc_lh->id, rsc->id); assign_node(rsc_lh, NULL, TRUE); } else { pe_rsc_debug(rsc, "Cannot pair %s with instance of %s", rsc_lh->id, rsc->id); } return; } get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Processing constraint %s: %s -> %s %d", constraint->id, rsc_lh->id, rsc->id, constraint->score); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if (constraint->score < INFINITY) { tuple->docker->cmds->rsc_colocation_rh(rsc_lh, tuple->docker, constraint); } else { node_t *chosen = tuple->docker->fns->location(tuple->docker, NULL, FALSE); if (chosen != NULL && is_set_recursive(tuple->docker, pe_rsc_block, TRUE) == FALSE) { pe_rsc_trace(rsc, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); allocated_rhs = g_list_prepend(allocated_rhs, chosen); } } } if (constraint->score >= INFINITY) { node_list_exclude(rsc_lh->allowed_nodes, allocated_rhs, FALSE); } g_list_free(allocated_rhs); } enum pe_action_flags container_action_flags(action_t * action, node_t * node) { GListPtr containers = NULL; enum pe_action_flags flags = 0; container_variant_data_t *data = NULL; get_container_variant_data(data, action->rsc); if(data->child) { enum action_tasks task = get_complex_task(data->child, action->task, TRUE); switch(task) { case no_action: case action_notify: case action_notified: case action_promote: case action_promoted: case action_demote: case action_demoted: return summary_action_flags(action, data->child->children, node); default: break; } } containers = get_container_list(action->rsc); flags = summary_action_flags(action, containers, node); g_list_free(containers); return flags; } resource_t * find_compatible_child_by_node(resource_t * local_child, node_t * local_node, resource_t * rsc, enum rsc_role_e filter, gboolean current) { GListPtr gIter = NULL; GListPtr children = NULL; if (local_node == NULL) { crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id); return NULL; } crm_trace("Looking for compatible child from %s for %s on %s", local_child->id, rsc->id, local_node->details->uname); children = get_containers_or_children(rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if(is_child_compatible(child_rsc, local_node, filter, current)) { crm_trace("Pairing %s with %s on %s", local_child->id, child_rsc->id, local_node->details->uname); return child_rsc; } } crm_trace("Can't pair %s with %s", local_child->id, rsc->id); if(children != rsc->children) { g_list_free(children); } return NULL; } static container_grouping_t * tuple_for_docker(resource_t *rsc, resource_t *docker, node_t *node) { if(rsc->variant == pe_container) { container_variant_data_t *data = NULL; get_container_variant_data(data, rsc); for (GListPtr gIter = data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if(tuple->child && docker == tuple->docker && node->details == tuple->node->details) { return tuple; } } } return NULL; } static enum pe_graph_flags container_update_interleave_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { GListPtr gIter = NULL; GListPtr children = NULL; gboolean current = FALSE; enum pe_graph_flags changed = pe_graph_none; /* Fix this - lazy */ if (crm_ends_with(first->uuid, "_stopped_0") || crm_ends_with(first->uuid, "_demoted_0")) { current = TRUE; } children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *then_child = (resource_t *) gIter->data; resource_t *first_child = find_compatible_child(then_child, first->rsc, RSC_ROLE_UNKNOWN, current); if (first_child == NULL && current) { crm_trace("Ignore"); } else if (first_child == NULL) { crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid); /* Me no like this hack - but what else can we do? * * If there is no-one active or about to be active * on the same node as then_child, then they must * not be allowed to start */ if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) { pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id); if(assign_node(then_child, NULL, TRUE)) { changed |= pe_graph_updated_then; } } } else { pe_action_t *first_action = NULL; pe_action_t *then_action = NULL; enum action_tasks task = clone_child_action(first); const char *first_task = task2text(task); container_grouping_t *first_tuple = tuple_for_docker(first->rsc, first_child, node); container_grouping_t *then_tuple = tuple_for_docker(then->rsc, then_child, node); if(strstr(first->task, "stop") && first_tuple && first_tuple->child) { /* Except for 'stopped' we should be looking at the * in-container resource, actions for the child will * happen later and are therefor more likely to align * with the user's intent. */ first_action = find_first_action(first_tuple->child->actions, NULL, task2text(task), node); } else { first_action = find_first_action(first_child->actions, NULL, task2text(task), node); } if(strstr(then->task, "mote") && then_tuple && then_tuple->child) { /* Promote/demote actions will never be found for the * docker resource, look in the child instead * * Alternatively treat: * 'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and * 'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY' */ then_action = find_first_action(then_tuple->child->actions, NULL, then->task, node); } else { then_action = find_first_action(then_child->actions, NULL, then->task, node); } if (first_action == NULL) { if (is_not_set(first_child->flags, pe_rsc_orphan) && crm_str_eq(first_task, RSC_STOP, TRUE) == FALSE && crm_str_eq(first_task, RSC_DEMOTE, TRUE) == FALSE) { crm_err("Internal error: No action found for %s in %s (first)", first_task, first_child->id); } else { crm_trace("No action found for %s in %s%s (first)", first_task, first_child->id, is_set(first_child->flags, pe_rsc_orphan) ? " (ORPHAN)" : ""); } continue; } /* We're only interested if 'then' is neither stopping nor being demoted */ if (then_action == NULL) { if (is_not_set(then_child->flags, pe_rsc_orphan) && crm_str_eq(then->task, RSC_STOP, TRUE) == FALSE && crm_str_eq(then->task, RSC_DEMOTE, TRUE) == FALSE) { crm_err("Internal error: No action found for %s in %s (then)", then->task, then_child->id); } else { crm_trace("No action found for %s in %s%s (then)", then->task, then_child->id, is_set(then_child->flags, pe_rsc_orphan) ? " (ORPHAN)" : ""); } continue; } if (order_actions(first_action, then_action, type)) { crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x", first_action->uuid, is_set(first_action->flags, pe_action_optional), then_action->uuid, is_set(then_action->flags, pe_action_optional), type); changed |= (pe_graph_updated_first | pe_graph_updated_then); } if(first_action && then_action) { changed |= then_child->cmds->update_actions(first_action, then_action, node, first_child->cmds->action_flags(first_action, node), filter, type); } else { crm_err("Nothing found either for %s (%p) or %s (%p) %s", first_child->id, first_action, then_child->id, then_action, task2text(task)); } } } if(children != then->rsc->children) { g_list_free(children); } return changed; } bool can_interleave_actions(pe_action_t *first, pe_action_t *then) { bool interleave = FALSE; resource_t *rsc = NULL; const char *interleave_s = NULL; if(first->rsc == NULL || then->rsc == NULL) { crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc == then->rsc) { crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) { crm_trace("Not interleaving %s with %s (both sides must be clones, masters, or bundles)", first->uuid, then->uuid); return FALSE; } if (crm_ends_with(then->uuid, "_stop_0") || crm_ends_with(then->uuid, "_demote_0")) { rsc = first->rsc; } else { rsc = then->rsc; } interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE); interleave = crm_is_true(interleave_s); crm_trace("Interleave %s -> %s: %s (based on %s)", first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id); return interleave; } enum pe_graph_flags container_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; crm_trace("%s -> %s", first->uuid, then->uuid); if(can_interleave_actions(first, then)) { changed = container_update_interleave_actions(first, then, node, flags, filter, type); } else if(then->rsc) { GListPtr gIter = NULL; GListPtr children = NULL; // Handle the 'primitive' ordering case changed |= native_update_actions(first, then, node, flags, filter, type); // Now any children (or containers in the case of a bundle) children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { resource_t *then_child = (resource_t *) gIter->data; enum pe_graph_flags then_child_changed = pe_graph_none; action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node); if (then_child_action) { enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node); if (is_set(then_child_flags, pe_action_runnable)) { then_child_changed |= then_child->cmds->update_actions(first, then_child_action, node, flags, filter, type); } changed |= then_child_changed; if (then_child_changed & pe_graph_updated_then) { for (GListPtr lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *next = (action_wrapper_t *) lpc->data; update_action(next->action); } } } } if(children != then->rsc->children) { g_list_free(children); } } return changed; } void container_rsc_location(resource_t * rsc, rsc_to_node_t * constraint) { container_variant_data_t *container_data = NULL; get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; if (tuple->docker) { tuple->docker->cmds->rsc_location(tuple->docker, constraint); } if(tuple->ip) { tuple->ip->cmds->rsc_location(tuple->ip, constraint); } } if(container_data->child && (constraint->role_filter == RSC_ROLE_SLAVE || constraint->role_filter == RSC_ROLE_MASTER)) { container_data->child->cmds->rsc_location(container_data->child, constraint); container_data->child->rsc_location = g_list_prepend(container_data->child->rsc_location, constraint); } } void container_expand(resource_t * rsc, pe_working_set_t * data_set) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); if(container_data->child) { container_data->child->cmds->expand(container_data->child, data_set); } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if (tuple->remote && tuple->docker && container_fix_remote_addr(tuple->remote)) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside xmlNode *nvpair = get_xpath_object("//nvpair[@name='addr']", tuple->remote->xml, LOG_ERR); const char *calculated_addr = container_fix_remote_addr_in(tuple->remote, nvpair, "value"); if (calculated_addr) { crm_trace("Fixed addr for %s on %s", tuple->remote->id, calculated_addr); g_hash_table_replace(tuple->remote->parameters, strdup("addr"), strdup(calculated_addr)); } else { crm_err("Could not fix addr for %s", tuple->remote->id); } } if(tuple->ip) { tuple->ip->cmds->expand(tuple->ip, data_set); } if(tuple->docker) { tuple->docker->cmds->expand(tuple->docker, data_set); } if(tuple->remote) { tuple->remote->cmds->expand(tuple->remote, data_set); } } } gboolean container_create_probe(resource_t * rsc, node_t * node, action_t * complete, gboolean force, pe_working_set_t * data_set) { bool any_created = FALSE; container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return FALSE); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { any_created |= tuple->ip->cmds->create_probe(tuple->ip, node, complete, force, data_set); } if(tuple->child && node->details == tuple->node->details) { any_created |= tuple->child->cmds->create_probe(tuple->child, node, complete, force, data_set); } if(tuple->docker) { bool created = tuple->docker->cmds->create_probe(tuple->docker, node, complete, force, data_set); if(created) { any_created = TRUE; /* If we're limited to one replica per host (due to * the lack of an IP range probably), then we don't * want any of our peer containers starting until * we've established that no other copies are already * running. * * Partly this is to ensure that replicas_per_host is * observed, but also to ensure that the containers * don't fail to start because the necessary port * mappings (which won't include an IP for uniqueness) * are already taken */ for (GListPtr tIter = container_data->tuples; tIter != NULL && container_data->replicas_per_host == 1; tIter = tIter->next) { container_grouping_t *other = (container_grouping_t *)tIter->data; if ((other != tuple) && (other != NULL) && (other->docker != NULL)) { custom_action_order(tuple->docker, generate_op_key(tuple->docker->id, RSC_STATUS, 0), NULL, other->docker, generate_op_key(other->docker->id, RSC_START, 0), NULL, pe_order_optional|pe_order_same_node, data_set); } } } } if (tuple->docker && tuple->remote && tuple->remote->cmds->create_probe(tuple->remote, node, complete, force, data_set)) { /* Do not probe the remote resource until we know where docker is running * Required for REMOTE_CONTAINER_HACK to correctly probe remote resources */ char *probe_uuid = generate_op_key(tuple->remote->id, RSC_STATUS, 0); action_t *probe = find_first_action(tuple->remote->actions, probe_uuid, NULL, node); free(probe_uuid); if (probe) { any_created = TRUE; crm_trace("Ordering %s probe on %s", tuple->remote->id, node->details->uname); custom_action_order(tuple->docker, generate_op_key(tuple->docker->id, RSC_START, 0), NULL, tuple->remote, NULL, probe, pe_order_probe, data_set); } } } return any_created; } void container_append_meta(resource_t * rsc, xmlNode * xml) { } GHashTable * container_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr, float factor, enum pe_weights flags) { return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags); } void container_LogActions( resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { LogActions(tuple->ip, data_set, terminal); } if(tuple->docker) { LogActions(tuple->docker, data_set, terminal); } if(tuple->remote) { LogActions(tuple->remote, data_set, terminal); } if(tuple->child) { LogActions(tuple->child, data_set, terminal); } } } diff --git a/xml/constraints-3.0.rng b/xml/constraints-3.0.rng new file mode 100644 index 0000000000..3698814e2f --- /dev/null +++ b/xml/constraints-3.0.rng @@ -0,0 +1,251 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + group + listed + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + stop + demote + fence + freeze + + + + + + + + + always + never + exclusive + + + + + + start + promote + demote + stop + + + + + + Stopped + Started + Master + Slave + + + + + + Optional + Mandatory + Serialize + + + + + + + + + + + +