diff --git a/configure.ac b/configure.ac index cce08b17a5..d89b57d7d3 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2073 +1,2065 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2020 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) AC_CONFIG_MACRO_DIR([m4]) AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) dnl 1.11: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl silent-rules: allow "--enable-silent-rules" (no-op in 1.13+) dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.11 foreign tar-ustar silent-rules subdir-objects]) dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ dnl Versioned attributes implementation is not yet production-ready AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes]) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd GLIB_TESTS dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AC_PROG_CC_STDC AC_PROG_CXX dnl C++ is not needed for build, just maintainer utilities dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs: dnl "The macro gl_EARLY must be called as soon as possible after verifying that dnl the C compiler is working. ... The core part of the gnulib checks are done dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL dnl as a dependency. gl_EARLY gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT LT_INIT([dlopen]) LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)], [RC=1; AC_MSG_RESULT(no)]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } dnl =============================================== dnl Configure Options dnl =============================================== dnl --enable-* options AC_ARG_ENABLE([ansi], [AS_HELP_STRING([--enable-ansi], [force GCC to compile to ANSI standard for older compilers. @<:@no@:>@])], ) AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@yes@:>@])], ) AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])], ) AC_ARG_ENABLE([no-stack], [AS_HELP_STRING([--enable-no-stack], [build only the scheduler and its requirements @<:@no@:>@])], ) AC_ARG_ENABLE([upstart], [AS_HELP_STRING([--enable-upstart], [enable support for managing resources via Upstart @<:@try@:>@])], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])], [], [enable_systemd=try], ) AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) # By default, we add symlinks at the pre-2.0.0 daemon name locations, so that: # (1) tools that directly invoke those names for metadata etc. will still work # (2) this installation can be used in a bundle container image used with # cluster hosts running Pacemaker 1.1.17+ # If you know your target systems will not have any need for it, you can # disable this option. Once the above use cases are no longer in wide use, we # can disable this option by default, and once we no longer want to support # them at all, we can drop the option altogether. AC_ARG_ENABLE([legacy-links], [AS_HELP_STRING([--enable-legacy-links], [add symlinks for old daemon names @<:@yes@:>@])], [ LEGACY_LINKS="${enableval}" ], [ LEGACY_LINKS=yes ], ) AM_CONDITIONAL(BUILD_LEGACY_LINKS, test "x${LEGACY_LINKS}" = "xyes") dnl --with-* options AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACKAGE_VERSION="$withval" ])] ) VERSION_ARG(VERSION_NUMBER) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer])], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios remote monitoring])], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) AC_ARG_WITH([acl], [AS_HELP_STRING([--with-acl], [support CIB ACL])], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets])], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) PCMK_GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], [directory for init (rc) scripts])], [ INITDIR="$withval" ] ) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) SUPPORT_PROFILING=0 AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations for effective profiling])], [ SUPPORT_PROFILING=$withval ] ) AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations for effective profiling])], [ SUPPORT_COVERAGE=$withval ] ) PUBLICAN_BRAND="common" AC_ARG_WITH([brand], [AS_HELP_STRING([--with-brand=brand], [brand to use for generated documentation (set empty for no docs) @<:@common@:>@])], [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ] ) AC_SUBST(PUBLICAN_BRAND) BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], [address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@])], [ BUG_URL="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) dnl The not-yet-released autoconf 2.70 will have a --runstatedir option. dnl Until that's available, emulate it with our own --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) dnl This defaults to /usr/lib rather than libdir because it's determined by the dnl OCF project and not pacemaker. Even if a user wants to install pacemaker to dnl /usr/local or such, the OCF agents will be expected in their usual dnl location. However, we do give the user the option to override it. OCF_ROOT_DIR="/usr/lib/ocf" AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], [OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@/usr/lib/ocf@:>@])], [ OCF_ROOT_DIR="$withval" ] ) AC_SUBST(OCF_ROOT_DIR) CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) dnl Deprecated options AC_ARG_WITH([pkg-name], [AS_HELP_STRING([--with-pkg-name=name], [deprecated and unused (will be removed in a future release)])], ) AC_ARG_WITH([pkgname], [AS_HELP_STRING([--with-pkgname=name], [deprecated and unused (will be removed in a future release)])], ) dnl =============================================== dnl General Processing dnl =============================================== AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", [Current pacemaker version]) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) AC_PROG_LN_S AC_PROG_MKDIR_P if cc_supports_flag -Werror; then WERROR="-Werror" else WERROR="" fi # Normalize enable_fatal_warnings (defaulting to yes, when compiler supports it) if test "x${enable_fatal_warnings}" != "xno" ; then if test "$GCC" = "yes" && test "x${WERROR}" != "x" ; then enable_fatal_warnings=yes else AC_MSG_NOTICE(Compiler does not support fatal warnings) enable_fatal_warnings=no fi fi INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in prefix|NONE) exec_prefix=$prefix ;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR) ;; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in prefix|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables if [ test "x${runstatedir}" = "x" ]; then if [ test "x${pcmk_runstatedir}" = "x" ]; then runstatedir="${localstatedir}/run" else runstatedir="${pcmk_runstatedir}" fi fi eval runstatedir="$(eval echo ${runstatedir})" AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) if test x"${CRM_LOG_DIR}" = x""; then CRM_LOG_DIR="${localstatedir}/log/pacemaker" fi AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) if test x"${CRM_BUNDLE_DIR}" = x""; then CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles" fi AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) if test x"${PCMK_GNUTLS_PRIORITIES}" = x""; then AC_MSG_ERROR([Empty string not applicable with --with-gnutls-priorities]) fi AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) if test x"${BUG_URL}" = x""; then BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker" fi AC_SUBST(BUG_URL) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([US_AUTH_PEERCRED_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([US_AUTH_PEERCRED_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) if test -z "${us_auth}"; then # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([US_AUTH_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([US_AUTH_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_ERROR([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) fi dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac ;; esac # C99 doesn't guarantee uint64_t type and related format specifiers, but # prerequisites, corosync + libqb, use that widely, so the target platforms # are already pre-constrained to those "64bit-clean" (doesn't imply native # bit width) and hence we deliberately refrain from artificial surrogates # (sans manipulation through cached values). AC_CACHE_VAL( [pcmk_cv_decl_inttypes], [ AC_CHECK_DECLS( [PRIu64, PRIu32, PRIx32, SCNu64], [pcmk_cv_decl_inttypes="PRIu64 PRIu32 PRIx32 SCNu64"], [ # test shall only react on "no" cached result & error out respectively if test "x$ac_cv_have_decl_PRIu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint64_t (PRIu64)]) elif test "x$ac_cv_have_decl_PRIu32" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint32_t (PRIu32)]) elif test "x$ac_cv_have_decl_PRIx32" = xno; then AC_MSG_ERROR([lack of inttypes.h based hexa specifier serving uint32_t (PRIx32)]) elif test "x$ac_cv_have_decl_SCNu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier gathering uint64_t (SCNu64)]) fi ], [[#include ]] ) ] ) ( set $pcmk_cv_decl_inttypes AC_DEFINE_UNQUOTED([U64T], [$1], [Correct format specifier for U64T]) AC_DEFINE_UNQUOTED([U32T], [$2], [Correct format specifier for U32T]) AC_DEFINE_UNQUOTED([X32T], [$3], [Correct format specifier for X32T]) AC_DEFINE_UNQUOTED([U64TS], [$4], [Correct format specifier for U64TS]) ) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path if test x"${PYTHON}" != x""; then AC_PATH_PROG([PYTHON], [$PYTHON]) fi case "x$PYTHON" in x*python3*|x*platform-python*) dnl When used with Python 3, Pacemaker requires a minimum of 3.2 AM_PATH_PYTHON([3.2]) ;; *) dnl Otherwise, Pacemaker requires a minimum of 2.7 AM_PATH_PYTHON([2.7]) ;; esac AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([PUBLICAN], [publican]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) dnl BASH is already an environment variable, so use something else AC_PATH_PROG([BASH_PATH], [bash]) -PKG_PROG_PKG_CONFIG +PKG_PROG_PKG_CONFIG(0.18) # PKG_NOARCH_INSTALLDIR not available prior to pkg-config 0.27 and # pkgconf 0.8.10, respectively (next line is to mimic that scenario) dnl m4_ifdef([PKG_NOARCH_INSTALLDIR], [m4_undefine([PKG_NOARCH_INSTALLDIR])]) m4_ifndef([PKG_NOARCH_INSTALLDIR], [ AC_DEFUN([PKG_NOARCH_INSTALLDIR], [ AC_SUBST([noarch_pkgconfigdir], ['${datadir}/pkgconfig']) ]) ]) PKG_NOARCH_INSTALLDIR AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi dnl Bash is needed for building man pages and running regression tests if test x"${BASH_PATH}" = x""; then AC_MSG_ERROR(bash must be installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) if test "x${ASCIIDOC_CONV}" != x; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi publican_intree_brand=no if test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""; then dnl special handling for clusterlabs brand (possibly in-tree version used) test "${PUBLICAN_BRAND}" != "clusterlabs" \ || test -d /usr/share/publican/Common_Content/clusterlabs if test $? -ne 0; then dnl Unknown option: brand_dir vs. Option brand_dir requires an argument if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common]) PUBLICAN_BRAND=common else publican_intree_brand=yes fi fi AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand]) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi AM_CONDITIONAL([BUILD_DOCBOOK], [test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""]) AM_CONDITIONAL([PUBLICAN_INTREE_BRAND], [test x"${publican_intree_brand}" = x"yes"]) AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x""]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH; do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then $GETOPT_PATH -T >/dev/null 2>/dev/null if test $? -eq 4; then break fi fi GETOPT_PATH="" done IFS=$IFS_orig if test -n "$GETOPT_PATH"; then AC_MSG_RESULT([$GETOPT_PATH]) else AC_MSG_RESULT([no]) AC_MSG_ERROR(Pacemaker build requires a GNU-compatible getopt) fi AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolerably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolerably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_FUNCS([sched_setscheduler]) if test "$ac_cv_func_sched_setscheduler" != yes; then PC_LIBS_RT="" else PC_LIBS_RT="-lrt" fi AC_SUBST(PC_LIBS_RT) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi if test x"${PKG_CONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi # Require glib 2.16.0 (2008-03) or later for g_hash_table_iter_init() etc. PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.16.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) if test "x${enable_fatal_warnings}" = xyes ; then cc_temp_flags "$CFLAGS $WERROR" fi AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/swab.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/reboot.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/signalfd.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) if test "x${enable_fatal_warnings}" = xyes ; then cc_restore_flags fi dnl These headers need prerequisites before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) AC_CHECK_HEADERS(libxml/xpath.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(libxml development headers not found) fi AC_CHECK_LIB(xslt, xsltApplyStylesheet, [], AC_MSG_ERROR(Unsupported libxslt library version)) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(libxslt development headers not found) fi AC_CACHE_CHECK(whether __progname and __progname_full are available, pf_cv_var_progname, AC_TRY_LINK([extern char *__progname, *__progname_full;], [__progname = "foo"; __progname_full = "foo bar";], pf_cv_var_progname="yes", pf_cv_var_progname="no")) if test "$pf_cv_var_progname" = "yes"; then AC_DEFINE(HAVE___PROGNAME,1,[ ]) fi dnl ======================================================================== dnl Generic declarations dnl ======================================================================== AC_CHECK_DECLS([CLOCK_MONOTONIC], [], [], [[ #include ]]) -# the above alone will allow using clock_gettime(CLOCK_MONOTONIC, ...), -# but in case there are any discrepancies found with the move onto that -# where ftime(3) was originally used -- the callsites make do without -# any such timestamp grabbing at this time -- so to revert that effect -# (trigger such omission), line below can be uncommented for that intent -# (alternatively, propagate equivalent variable assignment from outside) -#CPPFLAGS="-DPCMK_TIME_EMERGENCY_CGT $CPPFLAGS" - dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) if test "$pf_cv_var_sscanf" = "yes"; then AC_DEFINE(SSCANF_HAS_M, 1, [ ]) fi dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_TRY_COMPILE([#include ],[sighandler_t *f;], has_sighandler_t=yes,has_sighandler_t=no) AC_MSG_RESULT($has_sighandler_t) if test "$has_sighandler_t" = "yes" ; then AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual; then ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" cc_temp_flags "-Wcast-qual $WERROR" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 if cc_supports_flag -fPIC; then CFLAGS="$CFLAGS -fPIC" fi AC_MSG_CHECKING(whether printw() requires argument of "const char *") AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [pcmk_cv_compatible_printw=yes], [pcmk_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS cc_restore_flags AC_MSG_RESULT([$pcmk_cv_compatible_printw]) if test "$pcmk_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0 ;; esac AC_MSG_NOTICE(New CFLAGS: $CFLAGS) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== if test x${enable_no_stack} = xyes; then SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb >= 0.13) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 0.14.0+ (2012-06) AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" dnl libqb 0.17.0+ (2014-02) AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function])) dnl libqb 2.0.0+ (2020-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_CHECK_HEADERS(stonith/stonith.h) if test "$ac_cv_header_stonith_stonith_h" = "yes"; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) if test x"${CRM_DAEMON_USER}" = x""; then CRM_DAEMON_USER="hacluster" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) if test x"${CRM_DAEMON_GROUP}" = x""; then CRM_DAEMON_GROUP="haclient" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) OCF_RA_DIR="$OCF_ROOT_DIR/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) RH_STONITH_DIR="$sbindir" AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir", Location for system binaries) RH_STONITH_PREFIX="fence_" AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test $HAVE_dbus = 0; then PC_NAME_DBUS="" else PC_NAME_DBUS="dbus-1" fi AC_SUBST(PC_NAME_DBUS) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without DBus]) else enable_systemd=no fi fi if test $(echo "$CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without clock_gettime(CLOCK_MONOTONIC, ...)]) else enable_systemd=no fi fi if test "x${enable_systemd}" = xtry; then AC_MSG_CHECKING([for systemd version query result via dbus-send]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || systemctl --version 2>/dev/null | grep -q systemd; then enable_systemd=yes else enable_systemd=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) HAVE_systemd=0 if test "x${enable_systemd}" = xyes; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) if test "x${systemdsystemunitdir}" = x""; then AC_MSG_FAILURE([cannot enable systemd when systemdsystemunitdir unresolved]) fi fi AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then AC_MSG_FAILURE([cannot enable Upstart without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then AC_MSG_CHECKING([for Upstart version query result via dbus-send]) ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) HAVE_upstart=0 if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) case $SUPPORT_NAGIOS in 1|yes|true) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then AC_MSG_FAILURE([cannot enable nagios without clock_gettime(CLOCK_MONOTONIC, ...)]) fi SUPPORT_NAGIOS=1 ;; try) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then SUPPORT_NAGIOS=0 else SUPPORT_NAGIOS=1 fi ;; *) SUPPORT_NAGIOS=0 ;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_CS=no ;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS) SUPPORT_CS=1 PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal PKG_CHECK_MODULES(libcorosync_common, libcorosync_common) dnl Fatal CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-native" fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AC_SUBST(SUPPORT_COROSYNC) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported, building only the scheduler) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_ACL=no ;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) SUPPORT_ACL=1 AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then SUPPORT_ACL=0 fi if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1 ;; *) SUPPORT_CIBSECRETS=0 ;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) if test $SUPPORT_CIBSECRETS = 1; then PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) fi dnl ======================================================================== dnl GnuTLS dnl ======================================================================== dnl gnutls_priority_set_direct available since 2.1.7 (released 2007-11-29) AC_CHECK_LIB(gnutls, gnutls_priority_set_direct) if test "$ac_cv_lib_gnutls_gnutls_priority_set_direct" != ""; then AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_FUNCS([gnutls_sec_param_to_pk_bits]) dnl since 2.12.0 (2011-03-24) if test "$ac_cv_header_gnutls_gnutls_h" != "yes"; then PC_NAME_GNUTLS="" else PC_NAME_GNUTLS="gnutls" fi AC_SUBST(PC_NAME_GNUTLS) fi dnl ======================================================================== dnl PAM dnl ======================================================================== AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKG_CONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKG_CONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" else CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags if cc_supports_flag "-Wformat-nonliteral"; then gcc_format_nonliteral=yes else gcc_format_nonliteral=no fi # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat-security -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wwrite-strings -Wunused-but-set-variable -Wunsigned-char" if test "x$gcc_diagnostic_push_pull" = "xyes"; then AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" else if test "x$gcc_format_nonliteral" = "xyes"; then EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" fi fi # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $CC_EXTRAS $j then CC_EXTRAS="$CC_EXTRAS $j" fi done if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl cib, stonithd, pacemaker-remoted, pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi if test "x${HARDENING}" = "xno"; then AC_MSG_NOTICE([Hardening: explicitly disabled]) elif test "x${HARDENING}" = "xyes" \ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then dnl We'll figure out on our own... CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; relro=1]) # daemons: PIE for both CFLAGS and LDFLAGS if cc_supports_flag -fPIE; then flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; pie=1]) fi # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries if test "${relro}" = 1 && test "${pie}" = 1; then flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; bindnow=1]) fi if test "${bindnow}" = 1; then flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"]) fi # universal: prefer strong > all > default stack protector if possible flag= if cc_supports_flag -fstack-protector-strong; then flag="-fstack-protector-strong" elif cc_supports_flag -fstack-protector-all; then flag="-fstack-protector-all" elif cc_supports_flag -fstack-protector; then flag="-fstack-protector" fi if test -n "${flag}"; then CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 fi if test "${relro}" = 1 \ || test "${pie}" = 1 \ || test "${stackprot}" = 1; then AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) else AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) fi else AC_MSG_NOTICE([Hardening: using custom flags]) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS $WERROR" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant fi AC_MSG_RESULT(Suppress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl Files we output that need to be executable AC_CONFIG_FILES([cts/CTSlab.py], [chmod +x cts/CTSlab.py]) AC_CONFIG_FILES([cts/LSBDummy], [chmod +x cts/LSBDummy]) AC_CONFIG_FILES([cts/OCFIPraTest.py], [chmod +x cts/OCFIPraTest.py]) AC_CONFIG_FILES([cts/cluster_test], [chmod +x cts/cluster_test]) AC_CONFIG_FILES([cts/cts], [chmod +x cts/cts]) AC_CONFIG_FILES([cts/cts-cli], [chmod +x cts/cts-cli]) AC_CONFIG_FILES([cts/cts-coverage], [chmod +x cts/cts-coverage]) AC_CONFIG_FILES([cts/cts-exec], [chmod +x cts/cts-exec]) AC_CONFIG_FILES([cts/cts-fencing], [chmod +x cts/cts-fencing]) AC_CONFIG_FILES([cts/cts-log-watcher], [chmod +x cts/cts-log-watcher]) AC_CONFIG_FILES([cts/cts-regression], [chmod +x cts/cts-regression]) AC_CONFIG_FILES([cts/cts-scheduler], [chmod +x cts/cts-scheduler]) AC_CONFIG_FILES([cts/cts-support], [chmod +x cts/cts-support]) AC_CONFIG_FILES([cts/lxc_autogen.sh], [chmod +x cts/lxc_autogen.sh]) AC_CONFIG_FILES([cts/benchmark/clubench], [chmod +x cts/benchmark/clubench]) AC_CONFIG_FILES([cts/fence_dummy], [chmod +x cts/fence_dummy]) AC_CONFIG_FILES([cts/pacemaker-cts-dummyd], [chmod +x cts/pacemaker-cts-dummyd]) AC_CONFIG_FILES([daemons/fenced/fence_legacy], [chmod +x daemons/fenced/fence_legacy]) AC_CONFIG_FILES([doc/abi-check], [chmod +x doc/abi-check]) AC_CONFIG_FILES([extra/resources/ClusterMon], [chmod +x extra/resources/ClusterMon]) AC_CONFIG_FILES([extra/resources/HealthSMART], [chmod +x extra/resources/HealthSMART]) AC_CONFIG_FILES([extra/resources/SysInfo], [chmod +x extra/resources/SysInfo]) AC_CONFIG_FILES([extra/resources/ifspeed], [chmod +x extra/resources/ifspeed]) AC_CONFIG_FILES([extra/resources/o2cb], [chmod +x extra/resources/o2cb]) AC_CONFIG_FILES([tools/crm_failcount], [chmod +x tools/crm_failcount]) AC_CONFIG_FILES([tools/crm_master], [chmod +x tools/crm_master]) AC_CONFIG_FILES([tools/crm_report], [chmod +x tools/crm_report]) AC_CONFIG_FILES([tools/crm_standby], [chmod +x tools/crm_standby]) AC_CONFIG_FILES([tools/cibsecret], [chmod +x tools/cibsecret]) AC_CONFIG_FILES([tools/pcmk_simtimes], [chmod +x tools/pcmk_simtimes]) dnl Other files we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTS.py \ cts/CTSvars.py \ cts/benchmark/Makefile \ cts/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/schedulerd/Makefile \ devel/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/Clusters_from_Scratch/publican.cfg \ doc/Pacemaker_Administration/publican.cfg \ doc/Pacemaker_Development/publican.cfg \ doc/Pacemaker_Explained/publican.cfg \ doc/Pacemaker_Remote/publican.cfg \ doc/sphinx/Makefile \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ replace/Makefile \ lib/Makefile \ lib/libpacemaker.pc \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/flags/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/gnu/Makefile \ lib/pacemaker/Makefile \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/rules/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ maint/Makefile \ tests/Makefile \ tools/Makefile \ tools/report.collector \ tools/report.common \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_RESULT([ Unix socket auth method = ${us_auth}]) diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c index e3e26ef1ff..d02244f649 100644 --- a/daemons/execd/execd_commands.c +++ b/daemons/execd/execd_commands.c @@ -1,1910 +1,1909 @@ /* * Copyright 2012-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include // Check whether we have a high-resolution monotonic clock #undef PCMK__TIME_USE_CGT -#if HAVE_DECL_CLOCK_MONOTONIC && defined(CLOCK_MONOTONIC) \ - && !defined(PCMK_TIME_EMERGENCY_CGT) +#if HAVE_DECL_CLOCK_MONOTONIC && defined(CLOCK_MONOTONIC) # define PCMK__TIME_USE_CGT # include /* clock_gettime */ #endif #include #include #include #include #include #include #include #include "pacemaker-execd.h" #define EXIT_REASON_MAX_LEN 128 GHashTable *rsc_list = NULL; typedef struct lrmd_cmd_s { int timeout; guint interval_ms; int start_delay; int timeout_orig; int call_id; int exec_rc; int lrmd_op_status; int call_opts; /* Timer ids, must be removed on cmd destruction. */ int delay_id; int stonith_recurring_id; int rsc_deleted; int service_flags; char *client_id; char *origin; char *rsc_id; char *action; char *real_action; char *exit_reason; char *output; char *userdata_str; /* We can track operation queue time and run time, to be saved with the CIB * resource history (and displayed in cluster status). We need * high-resolution monotonic time for this purpose, so we use * clock_gettime(CLOCK_MONOTONIC, ...) (if available, otherwise this feature * is disabled). * * However, we also need epoch timestamps for recording the time the command * last ran and the time its return value last changed, for use in time * displays (as opposed to interval calculations). We keep time_t values for * this purpose. * * The last run time is used for both purposes, so we keep redundant * monotonic and epoch values for this. Technically the two could represent * different times, but since time_t has only second resolution and the * values are used for distinct purposes, that is not significant. */ #ifdef PCMK__TIME_USE_CGT /* Recurring and systemd operations may involve more than one executor * command per operation, so they need info about the original and the most * recent. */ struct timespec t_first_run; // When op first ran struct timespec t_run; // When op most recently ran struct timespec t_first_queue; // When op was first queued struct timespec t_queue; // When op was most recently queued #endif time_t epoch_last_run; // Epoch timestamp of when op last ran time_t epoch_rcchange; // Epoch timestamp of when rc last changed int first_notify_sent; int last_notify_rc; int last_notify_op_status; int last_pid; GHashTable *params; } lrmd_cmd_t; static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc); static gboolean lrmd_rsc_dispatch(gpointer user_data); static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id); #ifdef PCMK__TIME_USE_CGT /*! * \internal * \brief Check whether a struct timespec has been set * * \param[in] timespec Time to check * * \return true if timespec has been set (i.e. is nonzero), false otherwise */ static inline bool time_is_set(struct timespec *timespec) { return (timespec != NULL) && ((timespec->tv_sec != 0) || (timespec->tv_nsec != 0)); } /* * \internal * \brief Set a timespec (and its original if unset) to the current time * * \param[out] t_current Where to store current time * \param[out] t_orig Where to copy t_current if unset */ static void get_current_time(struct timespec *t_current, struct timespec *t_orig) { clock_gettime(CLOCK_MONOTONIC, t_current); if ((t_orig != NULL) && !time_is_set(t_orig)) { *t_orig = *t_current; } } /*! * \internal * \brief Return difference between two times in milliseconds * * \param[in] now More recent time (or NULL to use current time) * \param[in] old Earlier time * * \return milliseconds difference (or 0 if old is NULL or unset) * * \note Can overflow on 32bit machines when the differences is around * 24 days or more. */ static int time_diff_ms(struct timespec *now, struct timespec *old) { int diff_ms = 0; if (time_is_set(old)) { struct timespec local_now = { 0, }; if (now == NULL) { clock_gettime(CLOCK_MONOTONIC, &local_now); now = &local_now; } diff_ms = (now->tv_sec - old->tv_sec) * 1000 + (now->tv_nsec - old->tv_nsec) / 1000000; } return diff_ms; } /*! * \internal * \brief Reset a command's operation times to their original values. * * Reset a command's run and queued timestamps to the timestamps of the original * command, so we report the entire time since then and not just the time since * the most recent command (for recurring and systemd operations). * * \param[in] cmd Executor command object to reset * * \note It's not obvious what the queued time should be for a systemd * start/stop operation, which might go like this: * initial command queued 5ms, runs 3s * monitor command queued 10ms, runs 10s * monitor command queued 10ms, runs 10s * Is the queued time for that operation 5ms, 10ms or 25ms? The current * implementation will report 5ms. If it's 25ms, then we need to * subtract 20ms from the total exec time so as not to count it twice. * We can implement that later if it matters to anyone ... */ static void cmd_original_times(lrmd_cmd_t * cmd) { cmd->t_run = cmd->t_first_run; cmd->t_queue = cmd->t_first_queue; } #endif static void log_finished(lrmd_cmd_t * cmd, int exec_time, int queue_time) { char pid_str[32] = { 0, }; int log_level = LOG_INFO; if (cmd->last_pid) { snprintf(pid_str, 32, "%d", cmd->last_pid); } if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { log_level = LOG_DEBUG; } #ifdef PCMK__TIME_USE_CGT do_crm_log(log_level, "%s %s (call %d%s%s) exited with status %d" " (execution time %dms, queue time %dms)", cmd->rsc_id, cmd->action, cmd->call_id, (cmd->last_pid? ", PID " : ""), pid_str, cmd->exec_rc, exec_time, queue_time); #else do_crm_log(log_level, "%s %s (call %d%s%s) exited with status %d" cmd->rsc_id, cmd->action, cmd->call_id, (cmd->last_pid? ", PID " : ""), pid_str, cmd->exec_rc); #endif } static void log_execute(lrmd_cmd_t * cmd) { int log_level = LOG_INFO; if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { log_level = LOG_DEBUG; } do_crm_log(log_level, "executing - rsc:%s action:%s call_id:%d", cmd->rsc_id, cmd->action, cmd->call_id); } static const char * normalize_action_name(lrmd_rsc_t * rsc, const char *action) { if (pcmk__str_eq(action, "monitor", pcmk__str_casei) && pcmk_is_set(pcmk_get_ra_caps(rsc->class), pcmk_ra_cap_status)) { return "status"; } return action; } static lrmd_rsc_t * build_rsc_from_xml(xmlNode * msg) { xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR); lrmd_rsc_t *rsc = NULL; rsc = calloc(1, sizeof(lrmd_rsc_t)); crm_element_value_int(msg, F_LRMD_CALLOPTS, &rsc->call_opts); rsc->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID); rsc->class = crm_element_value_copy(rsc_xml, F_LRMD_CLASS); rsc->provider = crm_element_value_copy(rsc_xml, F_LRMD_PROVIDER); rsc->type = crm_element_value_copy(rsc_xml, F_LRMD_TYPE); rsc->work = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_rsc_dispatch, rsc); rsc->st_probe_rc = -ENODEV; // if stonith, initialize to "not running" return rsc; } static lrmd_cmd_t * create_lrmd_cmd(xmlNode *msg, pcmk__client_t *client) { int call_options = 0; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR); lrmd_cmd_t *cmd = NULL; cmd = calloc(1, sizeof(lrmd_cmd_t)); crm_element_value_int(msg, F_LRMD_CALLOPTS, &call_options); cmd->call_opts = call_options; cmd->client_id = strdup(client->id); crm_element_value_int(msg, F_LRMD_CALLID, &cmd->call_id); crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &cmd->interval_ms); crm_element_value_int(rsc_xml, F_LRMD_TIMEOUT, &cmd->timeout); crm_element_value_int(rsc_xml, F_LRMD_RSC_START_DELAY, &cmd->start_delay); cmd->timeout_orig = cmd->timeout; cmd->origin = crm_element_value_copy(rsc_xml, F_LRMD_ORIGIN); cmd->action = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ACTION); cmd->userdata_str = crm_element_value_copy(rsc_xml, F_LRMD_RSC_USERDATA_STR); cmd->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID); cmd->params = xml2list(rsc_xml); if (pcmk__str_eq(g_hash_table_lookup(cmd->params, "CRM_meta_on_fail"), "block", pcmk__str_casei)) { crm_debug("Setting flag to leave pid group on timeout and " "only kill action pid for " PCMK__OP_FMT, cmd->rsc_id, cmd->action, cmd->interval_ms); cmd->service_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Action", cmd->action, 0, SVC_ACTION_LEAVE_GROUP, "SVC_ACTION_LEAVE_GROUP"); } return cmd; } static void stop_recurring_timer(lrmd_cmd_t *cmd) { if (cmd) { if (cmd->stonith_recurring_id) { g_source_remove(cmd->stonith_recurring_id); } cmd->stonith_recurring_id = 0; } } static void free_lrmd_cmd(lrmd_cmd_t * cmd) { stop_recurring_timer(cmd); if (cmd->delay_id) { g_source_remove(cmd->delay_id); } if (cmd->params) { g_hash_table_destroy(cmd->params); } free(cmd->origin); free(cmd->action); free(cmd->real_action); free(cmd->userdata_str); free(cmd->rsc_id); free(cmd->output); free(cmd->exit_reason); free(cmd->client_id); free(cmd); } static gboolean stonith_recurring_op_helper(gpointer data) { lrmd_cmd_t *cmd = data; lrmd_rsc_t *rsc; cmd->stonith_recurring_id = 0; if (!cmd->rsc_id) { return FALSE; } rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id); CRM_ASSERT(rsc != NULL); /* take it out of recurring_ops list, and put it in the pending ops * to be executed */ rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd); rsc->pending_ops = g_list_append(rsc->pending_ops, cmd); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_queue), &(cmd->t_first_queue)); #endif mainloop_set_trigger(rsc->work); return FALSE; } static inline void start_recurring_timer(lrmd_cmd_t *cmd) { if (cmd && (cmd->interval_ms > 0)) { cmd->stonith_recurring_id = g_timeout_add(cmd->interval_ms, stonith_recurring_op_helper, cmd); } } static gboolean start_delay_helper(gpointer data) { lrmd_cmd_t *cmd = data; lrmd_rsc_t *rsc = NULL; cmd->delay_id = 0; rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL; if (rsc) { mainloop_set_trigger(rsc->work); } return FALSE; } static gboolean merge_recurring_duplicate(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { GListPtr gIter = NULL; lrmd_cmd_t * dup = NULL; gboolean dup_pending = FALSE; if (cmd->interval_ms == 0) { return 0; } for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) { dup = gIter->data; if (pcmk__str_eq(cmd->action, dup->action, pcmk__str_casei) && (cmd->interval_ms == dup->interval_ms)) { dup_pending = TRUE; goto merge_dup; } } /* if dup is in recurring_ops list, that means it has already executed * and is in the interval loop. we can't just remove it in this case. */ for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) { dup = gIter->data; if (pcmk__str_eq(cmd->action, dup->action, pcmk__str_casei) && (cmd->interval_ms == dup->interval_ms)) { goto merge_dup; } } return FALSE; merge_dup: /* This should not occur. If it does, we need to investigate how something * like this is possible in the controller. */ crm_warn("Duplicate recurring op entry detected (" PCMK__OP_FMT "), merging with previous op entry", rsc->rsc_id, normalize_action_name(rsc, dup->action), dup->interval_ms); /* merge */ dup->first_notify_sent = 0; free(dup->userdata_str); dup->userdata_str = cmd->userdata_str; cmd->userdata_str = NULL; dup->call_id = cmd->call_id; if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { /* if we are waiting for the next interval, kick it off now */ if (dup_pending == TRUE) { stop_recurring_timer(cmd); stonith_recurring_op_helper(cmd); } } else if (dup_pending == FALSE) { /* if we've already handed this to the service lib, kick off an early execution */ services_action_kick(rsc->rsc_id, normalize_action_name(rsc, dup->action), dup->interval_ms); } free_lrmd_cmd(cmd); return TRUE; } static void schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { gboolean dup_processed = FALSE; CRM_CHECK(cmd != NULL, return); CRM_CHECK(rsc != NULL, return); crm_trace("Scheduling %s on %s", cmd->action, rsc->rsc_id); dup_processed = merge_recurring_duplicate(rsc, cmd); if (dup_processed) { /* duplicate recurring cmd found, cmds merged */ return; } /* The controller expects the executor to automatically cancel * recurring operations before a resource stops. */ if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { cancel_all_recurring(rsc, NULL); } rsc->pending_ops = g_list_append(rsc->pending_ops, cmd); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_queue), &(cmd->t_first_queue)); #endif mainloop_set_trigger(rsc->work); if (cmd->start_delay) { cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd); } } static xmlNode * create_lrmd_reply(const char *origin, int rc, int call_id) { xmlNode *reply = create_xml_node(NULL, T_LRMD_REPLY); crm_xml_add(reply, F_LRMD_ORIGIN, origin); crm_xml_add_int(reply, F_LRMD_RC, rc); crm_xml_add_int(reply, F_LRMD_CALLID, call_id); return reply; } static void send_client_notify(gpointer key, gpointer value, gpointer user_data) { xmlNode *update_msg = user_data; pcmk__client_t *client = value; int rc; int log_level = LOG_WARNING; const char *msg = NULL; CRM_CHECK(client != NULL, return); if (client->name == NULL) { crm_trace("Skipping notification to client without name"); return; } if (pcmk_is_set(client->flags, pcmk__client_to_proxy)) { /* We only want to notify clients of the executor IPC API. If we are * running as Pacemaker Remote, we may have clients proxied to other * IPC services in the cluster, so skip those. */ crm_trace("Skipping executor API notification to %s IPC client", client->name); return; } rc = lrmd_server_send_notify(client, update_msg); if (rc == pcmk_rc_ok) { return; } switch (rc) { case ENOTCONN: case EPIPE: // Client exited without waiting for notification log_level = LOG_INFO; msg = "Disconnected"; break; default: msg = pcmk_rc_str(rc); break; } do_crm_log(log_level, "Could not notify client %s/%s: %s " CRM_XS " rc=%d", client->name, client->id, msg, rc); } static void send_cmd_complete_notify(lrmd_cmd_t * cmd) { xmlNode *notify = NULL; #ifdef PCMK__TIME_USE_CGT int exec_time = time_diff_ms(NULL, &(cmd->t_run)); int queue_time = time_diff_ms(&cmd->t_run, &(cmd->t_queue)); log_finished(cmd, exec_time, queue_time); #else log_finished(cmd, 0, 0); #endif /* if the first notify result for a cmd has already been sent earlier, and the * the option to only send notifies on result changes is set. Check to see * if the last result is the same as the new one. If so, suppress this update */ if (cmd->first_notify_sent && (cmd->call_opts & lrmd_opt_notify_changes_only)) { if (cmd->last_notify_rc == cmd->exec_rc && cmd->last_notify_op_status == cmd->lrmd_op_status) { /* only send changes */ return; } } cmd->first_notify_sent = 1; cmd->last_notify_rc = cmd->exec_rc; cmd->last_notify_op_status = cmd->lrmd_op_status; notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(notify, F_LRMD_ORIGIN, __func__); crm_xml_add_int(notify, F_LRMD_TIMEOUT, cmd->timeout); crm_xml_add_ms(notify, F_LRMD_RSC_INTERVAL, cmd->interval_ms); crm_xml_add_int(notify, F_LRMD_RSC_START_DELAY, cmd->start_delay); crm_xml_add_int(notify, F_LRMD_EXEC_RC, cmd->exec_rc); crm_xml_add_int(notify, F_LRMD_OP_STATUS, cmd->lrmd_op_status); crm_xml_add_int(notify, F_LRMD_CALLID, cmd->call_id); crm_xml_add_int(notify, F_LRMD_RSC_DELETED, cmd->rsc_deleted); crm_xml_add_ll(notify, F_LRMD_RSC_RUN_TIME, (long long) cmd->epoch_last_run); crm_xml_add_ll(notify, F_LRMD_RSC_RCCHANGE_TIME, (long long) cmd->epoch_rcchange); #ifdef PCMK__TIME_USE_CGT crm_xml_add_int(notify, F_LRMD_RSC_EXEC_TIME, exec_time); crm_xml_add_int(notify, F_LRMD_RSC_QUEUE_TIME, queue_time); #endif crm_xml_add(notify, F_LRMD_OPERATION, LRMD_OP_RSC_EXEC); crm_xml_add(notify, F_LRMD_RSC_ID, cmd->rsc_id); if(cmd->real_action) { crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->real_action); } else { crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->action); } crm_xml_add(notify, F_LRMD_RSC_USERDATA_STR, cmd->userdata_str); crm_xml_add(notify, F_LRMD_RSC_OUTPUT, cmd->output); crm_xml_add(notify, F_LRMD_RSC_EXIT_REASON, cmd->exit_reason); if (cmd->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; xmlNode *args = create_xml_node(notify, XML_TAG_ATTRS); g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { hash2smartfield((gpointer) key, (gpointer) value, args); } } if (cmd->client_id && (cmd->call_opts & lrmd_opt_notify_orig_only)) { pcmk__client_t *client = pcmk__find_client_by_id(cmd->client_id); if (client) { send_client_notify(client->id, client, notify); } } else { pcmk__foreach_ipc_client(send_client_notify, notify); } free_xml(notify); } static void send_generic_notify(int rc, xmlNode * request) { if (pcmk__ipc_client_count() != 0) { int call_id = 0; xmlNode *notify = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); const char *op = crm_element_value(request, F_LRMD_OPERATION); crm_element_value_int(request, F_LRMD_CALLID, &call_id); notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(notify, F_LRMD_ORIGIN, __func__); crm_xml_add_int(notify, F_LRMD_RC, rc); crm_xml_add_int(notify, F_LRMD_CALLID, call_id); crm_xml_add(notify, F_LRMD_OPERATION, op); crm_xml_add(notify, F_LRMD_RSC_ID, rsc_id); pcmk__foreach_ipc_client(send_client_notify, notify); free_xml(notify); } } static void cmd_reset(lrmd_cmd_t * cmd) { cmd->lrmd_op_status = 0; cmd->last_pid = 0; #ifdef PCMK__TIME_USE_CGT memset(&cmd->t_run, 0, sizeof(cmd->t_run)); memset(&cmd->t_queue, 0, sizeof(cmd->t_queue)); #endif cmd->epoch_last_run = 0; free(cmd->exit_reason); cmd->exit_reason = NULL; free(cmd->output); cmd->output = NULL; } static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc) { crm_trace("Resource operation rsc:%s action:%s completed (%p %p)", cmd->rsc_id, cmd->action, rsc ? rsc->active : NULL, cmd); if (rsc && (rsc->active == cmd)) { rsc->active = NULL; mainloop_set_trigger(rsc->work); } if (!rsc) { cmd->rsc_deleted = 1; } /* reset original timeout so client notification has correct information */ cmd->timeout = cmd->timeout_orig; send_cmd_complete_notify(cmd); if (cmd->interval_ms && (cmd->lrmd_op_status == PCMK_LRM_OP_CANCELLED)) { if (rsc) { rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd); rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd); } free_lrmd_cmd(cmd); } else if (cmd->interval_ms == 0) { if (rsc) { rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd); } free_lrmd_cmd(cmd); } else { /* Clear all the values pertaining just to the last iteration of a recurring op. */ cmd_reset(cmd); } } static int ocf2uniform_rc(int rc) { if (rc < 0 || rc > PCMK_OCF_FAILED_MASTER) { return PCMK_OCF_UNKNOWN_ERROR; } return rc; } static int stonith2uniform_rc(const char *action, int rc) { switch (rc) { case pcmk_ok: rc = PCMK_OCF_OK; break; case -ENODEV: /* This should be possible only for probes in practice, but * interpret for all actions to be safe. */ if (pcmk__str_eq(action, "monitor", pcmk__str_casei)) { rc = PCMK_OCF_NOT_RUNNING; } else if (pcmk__str_eq(action, "stop", pcmk__str_casei)) { rc = PCMK_OCF_OK; } else { rc = PCMK_OCF_NOT_INSTALLED; } break; case -EOPNOTSUPP: rc = PCMK_OCF_UNIMPLEMENT_FEATURE; break; case -ETIME: case -ETIMEDOUT: rc = PCMK_OCF_TIMEOUT; break; default: rc = PCMK_OCF_UNKNOWN_ERROR; break; } return rc; } #if SUPPORT_NAGIOS static int nagios2uniform_rc(const char *action, int rc) { if (rc < 0) { return PCMK_OCF_UNKNOWN_ERROR; } switch (rc) { case NAGIOS_STATE_OK: return PCMK_OCF_OK; case NAGIOS_INSUFFICIENT_PRIV: return PCMK_OCF_INSUFFICIENT_PRIV; case NAGIOS_NOT_INSTALLED: return PCMK_OCF_NOT_INSTALLED; case NAGIOS_STATE_WARNING: case NAGIOS_STATE_CRITICAL: case NAGIOS_STATE_UNKNOWN: case NAGIOS_STATE_DEPENDENT: default: return PCMK_OCF_UNKNOWN_ERROR; } return PCMK_OCF_UNKNOWN_ERROR; } #endif static int get_uniform_rc(const char *standard, const char *action, int rc) { if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) { return ocf2uniform_rc(rc); } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { return stonith2uniform_rc(action, rc); } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { return rc; } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_casei)) { return rc; #if SUPPORT_NAGIOS } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) { return nagios2uniform_rc(action, rc); #endif } else { return services_get_ocf_exitcode(action, rc); } } static int action_get_uniform_rc(svc_action_t * action) { lrmd_cmd_t *cmd = action->cb_data; return get_uniform_rc(action->standard, cmd->action, action->rc); } struct notify_new_client_data { xmlNode *notify; pcmk__client_t *new_client; }; static void notify_one_client(gpointer key, gpointer value, gpointer user_data) { pcmk__client_t *client = value; struct notify_new_client_data *data = user_data; if (!pcmk__str_eq(client->id, data->new_client->id, pcmk__str_casei)) { send_client_notify(key, (gpointer) client, (gpointer) data->notify); } } void notify_of_new_client(pcmk__client_t *new_client) { struct notify_new_client_data data; data.new_client = new_client; data.notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(data.notify, F_LRMD_ORIGIN, __func__); crm_xml_add(data.notify, F_LRMD_OPERATION, LRMD_OP_NEW_CLIENT); pcmk__foreach_ipc_client(notify_one_client, &data); free_xml(data.notify); } static char * parse_exit_reason(const char *output) { const char *cur = NULL; const char *last = NULL; static int cookie_len = 0; char *eol = NULL; size_t reason_len = EXIT_REASON_MAX_LEN; if (output == NULL) { return NULL; } if (!cookie_len) { cookie_len = strlen(PCMK_OCF_REASON_PREFIX); } cur = strstr(output, PCMK_OCF_REASON_PREFIX); for (; cur != NULL; cur = strstr(cur, PCMK_OCF_REASON_PREFIX)) { /* skip over the cookie delimiter string */ cur += cookie_len; last = cur; } if (last == NULL) { return NULL; } // Truncate everything after a new line, and limit reason string size eol = strchr(last, '\n'); if (eol) { reason_len = QB_MIN(reason_len, eol - last); } return strndup(last, reason_len); } void client_disconnect_cleanup(const char *client_id) { GHashTableIter iter; lrmd_rsc_t *rsc = NULL; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) { if (rsc->call_opts & lrmd_opt_drop_recurring) { /* This client is disconnecting, drop any recurring operations * it may have initiated on the resource */ cancel_all_recurring(rsc, client_id); } } } static void action_complete(svc_action_t * action) { lrmd_rsc_t *rsc; lrmd_cmd_t *cmd = action->cb_data; const char *rclass = NULL; #ifdef PCMK__TIME_USE_CGT bool goagain = false; #endif if (!cmd) { crm_err("Completed executor action (%s) does not match any known operations", action->id); return; } #ifdef PCMK__TIME_USE_CGT if (cmd->exec_rc != action->rc) { cmd->epoch_rcchange = time(NULL); } #endif cmd->last_pid = action->pid; cmd->exec_rc = action_get_uniform_rc(action); cmd->lrmd_op_status = action->status; rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL; if (rsc && pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { rclass = resources_find_service_class(rsc->type); } else if(rsc) { rclass = rsc->class; } #ifdef PCMK__TIME_USE_CGT if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { if ((cmd->exec_rc == PCMK_OCF_OK) && pcmk__strcase_any_of(cmd->action, "start", "stop", NULL)) { /* systemd returns from start and stop actions after the action * begins, not after it completes. We have to jump through a few * hoops so that we don't report 'complete' to the rest of pacemaker * until it's actually done. */ goagain = true; cmd->real_action = cmd->action; cmd->action = strdup("monitor"); } else if (cmd->real_action != NULL) { // This is follow-up monitor to check whether start/stop completed if ((cmd->lrmd_op_status == PCMK_LRM_OP_DONE) && (cmd->exec_rc == PCMK_OCF_PENDING)) { goagain = true; } else if ((cmd->exec_rc == PCMK_OCF_OK) && pcmk__str_eq(cmd->real_action, "stop", pcmk__str_casei)) { goagain = true; } else { int time_sum = time_diff_ms(NULL, &(cmd->t_first_run)); int timeout_left = cmd->timeout_orig - time_sum; crm_debug("%s systemd %s is now complete (elapsed=%dms, " "remaining=%dms): %s (%d)", cmd->rsc_id, cmd->real_action, time_sum, timeout_left, services_ocf_exitcode_str(cmd->exec_rc), cmd->exec_rc); cmd_original_times(cmd); // Monitors may return "not running", but start/stop shouldn't if ((cmd->lrmd_op_status == PCMK_LRM_OP_DONE) && (cmd->exec_rc == PCMK_OCF_NOT_RUNNING)) { if (pcmk__str_eq(cmd->real_action, "start", pcmk__str_casei)) { cmd->exec_rc = PCMK_OCF_UNKNOWN_ERROR; } else if (pcmk__str_eq(cmd->real_action, "stop", pcmk__str_casei)) { cmd->exec_rc = PCMK_OCF_OK; } } } } } #endif #if SUPPORT_NAGIOS if (rsc && pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) { if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei) && (cmd->interval_ms == 0) && cmd->exec_rc == PCMK_OCF_OK) { /* Successfully executed --version for the nagios plugin */ cmd->exec_rc = PCMK_OCF_NOT_RUNNING; } else if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei) && cmd->exec_rc != PCMK_OCF_OK) { #ifdef PCMK__TIME_USE_CGT goagain = true; #endif } } #endif #ifdef PCMK__TIME_USE_CGT if (goagain) { int time_sum = time_diff_ms(NULL, &(cmd->t_first_run)); int timeout_left = cmd->timeout_orig - time_sum; int delay = cmd->timeout_orig / 10; if(delay >= timeout_left && timeout_left > 20) { delay = timeout_left/2; } delay = QB_MIN(2000, delay); if (delay < timeout_left) { cmd->start_delay = delay; cmd->timeout = timeout_left; if(cmd->exec_rc == PCMK_OCF_OK) { crm_debug("%s %s may still be in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->real_action, time_sum, timeout_left, delay); } else if(cmd->exec_rc == PCMK_OCF_PENDING) { crm_info("%s %s is still in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->action, time_sum, timeout_left, delay); } else { crm_notice("%s %s failed '%s' (%d): re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->action, services_ocf_exitcode_str(cmd->exec_rc), cmd->exec_rc, time_sum, timeout_left, delay); } cmd_reset(cmd); if(rsc) { rsc->active = NULL; } schedule_lrmd_cmd(rsc, cmd); /* Don't finalize cmd, we're not done with it yet */ return; } else { crm_notice("Giving up on %s %s (rc=%d): timeout (elapsed=%dms, remaining=%dms)", cmd->rsc_id, cmd->real_action?cmd->real_action:cmd->action, cmd->exec_rc, time_sum, timeout_left); cmd->lrmd_op_status = PCMK_LRM_OP_TIMEOUT; cmd->exec_rc = PCMK_OCF_TIMEOUT; cmd_original_times(cmd); } } #endif if (action->stderr_data) { cmd->output = strdup(action->stderr_data); cmd->exit_reason = parse_exit_reason(action->stderr_data); } else if (action->stdout_data) { cmd->output = strdup(action->stdout_data); } cmd_finalize(cmd, rsc); } /*! * \internal * \brief Determine operation status of a stonith operation * * Non-stonith resource operations get their operation status directly from the * service library, but the fencer does not have an equivalent, so we must infer * an operation status from the fencer API's return code. * * \param[in] action Name of action performed on stonith resource * \param[in] interval_ms Action interval * \param[in] rc Action result from fencer * * \return Operation status corresponding to fencer API return code */ static int stonith_rc2status(const char *action, guint interval_ms, int rc) { int status = PCMK_LRM_OP_DONE; switch (rc) { case pcmk_ok: break; case -EOPNOTSUPP: case -EPROTONOSUPPORT: status = PCMK_LRM_OP_NOTSUPPORTED; break; case -ETIME: case -ETIMEDOUT: status = PCMK_LRM_OP_TIMEOUT; break; case -ENOTCONN: case -ECOMM: // Couldn't talk to fencer status = PCMK_LRM_OP_ERROR; break; case -ENODEV: // The device is not registered with the fencer status = PCMK_LRM_OP_ERROR; break; default: break; } return status; } static void stonith_action_complete(lrmd_cmd_t * cmd, int rc) { // This can be NULL if resource was removed before command completed lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id); cmd->exec_rc = stonith2uniform_rc(cmd->action, rc); /* This function may be called with status already set to cancelled, if a * pending action was aborted. Otherwise, we need to determine status from * the fencer return code. */ if (cmd->lrmd_op_status != PCMK_LRM_OP_CANCELLED) { cmd->lrmd_op_status = stonith_rc2status(cmd->action, cmd->interval_ms, rc); // Certain successful actions change the known state of the resource if (rsc && (cmd->exec_rc == PCMK_OCF_OK)) { if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) { rsc->st_probe_rc = pcmk_ok; // maps to PCMK_OCF_OK } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { rsc->st_probe_rc = -ENODEV; // maps to PCMK_OCF_NOT_RUNNING } } } /* The recurring timer should not be running at this point in any case, but * as a failsafe, stop it if it is. */ stop_recurring_timer(cmd); /* Reschedule this command if appropriate. If a recurring command is *not* * rescheduled, its status must be PCMK_LRM_OP_CANCELLED, otherwise it will * not be removed from recurring_ops by cmd_finalize(). */ if (rsc && (cmd->interval_ms > 0) && (cmd->lrmd_op_status != PCMK_LRM_OP_CANCELLED)) { start_recurring_timer(cmd); } cmd_finalize(cmd, rsc); } static void lrmd_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data) { stonith_action_complete(data->userdata, data->rc); } void stonith_connection_failed(void) { GHashTableIter iter; GList *cmd_list = NULL; GList *cmd_iter = NULL; lrmd_rsc_t *rsc = NULL; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) { if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { /* If we registered this fence device, we don't know whether the * fencer still has the registration or not. Cause future probes to * return PCMK_OCF_UNKNOWN_ERROR until the resource is stopped or * started successfully. This is especially important if the * controller also went away (possibly due to a cluster layer * restart) and won't receive our client notification of any * monitors finalized below. */ if (rsc->st_probe_rc == pcmk_ok) { rsc->st_probe_rc = pcmk_err_generic; } if (rsc->active) { cmd_list = g_list_append(cmd_list, rsc->active); } if (rsc->recurring_ops) { cmd_list = g_list_concat(cmd_list, rsc->recurring_ops); } if (rsc->pending_ops) { cmd_list = g_list_concat(cmd_list, rsc->pending_ops); } rsc->pending_ops = rsc->recurring_ops = NULL; } } if (!cmd_list) { return; } crm_err("Connection to fencer failed, finalizing %d pending operations", g_list_length(cmd_list)); for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) { stonith_action_complete(cmd_iter->data, -ENOTCONN); } g_list_free(cmd_list); } /*! * \internal * \brief Execute a stonith resource "start" action * * Start a stonith resource by registering it with the fencer. * (Stonith agents don't have a start command.) * * \param[in] stonith_api Connection to fencer * \param[in] rsc Stonith resource to start * \param[in] cmd Start command to execute * * \return pcmk_ok on success, -errno otherwise */ static int execd_stonith_start(stonith_t *stonith_api, lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) { char *key = NULL; char *value = NULL; stonith_key_value_t *device_params = NULL; int rc = pcmk_ok; // Convert command parameters to stonith API key/values if (cmd->params) { GHashTableIter iter; g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { device_params = stonith_key_value_add(device_params, key, value); } } /* The fencer will automatically register devices via CIB notifications * when the CIB changes, but to avoid a possible race condition between * the fencer receiving the notification and the executor requesting that * resource, the executor registers the device as well. The fencer knows how * to handle duplicate registrations. */ rc = stonith_api->cmds->register_device(stonith_api, st_opt_sync_call, cmd->rsc_id, rsc->provider, rsc->type, device_params); stonith_key_value_freeall(device_params, 1, 1); return rc; } /*! * \internal * \brief Execute a stonith resource "stop" action * * Stop a stonith resource by unregistering it with the fencer. * (Stonith agents don't have a stop command.) * * \param[in] stonith_api Connection to fencer * \param[in] rsc Stonith resource to stop * * \return pcmk_ok on success, -errno otherwise */ static inline int execd_stonith_stop(stonith_t *stonith_api, const lrmd_rsc_t *rsc) { /* @TODO Failure would indicate a problem communicating with fencer; * perhaps we should try reconnecting and retrying a few times? */ return stonith_api->cmds->remove_device(stonith_api, st_opt_sync_call, rsc->rsc_id); } /*! * \internal * \brief Initiate a stonith resource agent recurring "monitor" action * * \param[in] stonith_api Connection to fencer * \param[in] rsc Stonith resource to monitor * \param[in] cmd Monitor command being executed * * \return pcmk_ok if monitor was successfully initiated, -errno otherwise */ static inline int execd_stonith_monitor(stonith_t *stonith_api, lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) { int rc = stonith_api->cmds->monitor(stonith_api, 0, cmd->rsc_id, cmd->timeout / 1000); rc = stonith_api->cmds->register_callback(stonith_api, rc, 0, 0, cmd, "lrmd_stonith_callback", lrmd_stonith_callback); if (rc == TRUE) { rsc->active = cmd; rc = pcmk_ok; } else { rc = -pcmk_err_generic; } return rc; } static void lrmd_rsc_execute_stonith(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { int rc = 0; bool do_monitor = FALSE; stonith_t *stonith_api = get_stonith_connection(); if (!stonith_api) { rc = -ENOTCONN; } else if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) { rc = execd_stonith_start(stonith_api, rsc, cmd); if (rc == 0) { do_monitor = TRUE; } } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { rc = execd_stonith_stop(stonith_api, rsc); } else if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) { if (cmd->interval_ms > 0) { do_monitor = TRUE; } else { rc = rsc->st_probe_rc; } } if (do_monitor) { rc = execd_stonith_monitor(stonith_api, rsc, cmd); if (rc == pcmk_ok) { // Don't clean up yet, we will find out result of the monitor later return; } } stonith_action_complete(cmd, rc); } static int lrmd_rsc_execute_service_lib(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { svc_action_t *action = NULL; GHashTable *params_copy = NULL; CRM_ASSERT(rsc); CRM_ASSERT(cmd); crm_trace("Creating action, resource:%s action:%s class:%s provider:%s agent:%s", rsc->rsc_id, cmd->action, rsc->class, rsc->provider, rsc->type); #if SUPPORT_NAGIOS /* Recurring operations are cancelled anyway for a stop operation */ if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei) && pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) { cmd->exec_rc = PCMK_OCF_OK; goto exec_done; } #endif params_copy = crm_str_table_dup(cmd->params); action = resources_action_create(rsc->rsc_id, rsc->class, rsc->provider, rsc->type, normalize_action_name(rsc, cmd->action), cmd->interval_ms, cmd->timeout, params_copy, cmd->service_flags); if (!action) { crm_err("Failed to create action, action:%s on resource %s", cmd->action, rsc->rsc_id); cmd->lrmd_op_status = PCMK_LRM_OP_ERROR; goto exec_done; } action->cb_data = cmd; /* 'cmd' may not be valid after this point if * services_action_async() returned TRUE * * Upstart and systemd both synchronously determine monitor/status * results and call action_complete (which may free 'cmd') if necessary. */ if (services_action_async(action, action_complete)) { return TRUE; } cmd->exec_rc = action->rc; if(action->status != PCMK_LRM_OP_DONE) { cmd->lrmd_op_status = action->status; } else { cmd->lrmd_op_status = PCMK_LRM_OP_ERROR; } services_action_free(action); action = NULL; exec_done: cmd_finalize(cmd, rsc); return TRUE; } static gboolean lrmd_rsc_execute(lrmd_rsc_t * rsc) { lrmd_cmd_t *cmd = NULL; CRM_CHECK(rsc != NULL, return FALSE); if (rsc->active) { crm_trace("%s is still active", rsc->rsc_id); return TRUE; } if (rsc->pending_ops) { GList *first = rsc->pending_ops; cmd = first->data; if (cmd->delay_id) { crm_trace ("Command %s %s was asked to run too early, waiting for start_delay timeout of %dms", cmd->rsc_id, cmd->action, cmd->start_delay); return TRUE; } rsc->pending_ops = g_list_remove_link(rsc->pending_ops, first); g_list_free_1(first); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_run), &(cmd->t_first_run)); #endif cmd->epoch_last_run = time(NULL); } if (!cmd) { crm_trace("Nothing further to do for %s", rsc->rsc_id); return TRUE; } rsc->active = cmd; /* only one op at a time for a rsc */ if (cmd->interval_ms) { rsc->recurring_ops = g_list_append(rsc->recurring_ops, cmd); } log_execute(cmd); if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { lrmd_rsc_execute_stonith(rsc, cmd); } else { lrmd_rsc_execute_service_lib(rsc, cmd); } return TRUE; } static gboolean lrmd_rsc_dispatch(gpointer user_data) { return lrmd_rsc_execute(user_data); } void free_rsc(gpointer data) { GListPtr gIter = NULL; lrmd_rsc_t *rsc = data; int is_stonith = pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei); gIter = rsc->pending_ops; while (gIter != NULL) { GListPtr next = gIter->next; lrmd_cmd_t *cmd = gIter->data; /* command was never executed */ cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; cmd_finalize(cmd, NULL); gIter = next; } /* frees list, but not list elements. */ g_list_free(rsc->pending_ops); gIter = rsc->recurring_ops; while (gIter != NULL) { GListPtr next = gIter->next; lrmd_cmd_t *cmd = gIter->data; if (is_stonith) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; /* If a stonith command is in-flight, just mark it as cancelled; * it is not safe to finalize/free the cmd until the stonith api * says it has either completed or timed out. */ if (rsc->active != cmd) { cmd_finalize(cmd, NULL); } } else { /* This command is already handed off to service library, * let service library cancel it and tell us via the callback * when it is cancelled. The rsc can be safely destroyed * even if we are waiting for the cancel result */ services_action_cancel(rsc->rsc_id, normalize_action_name(rsc, cmd->action), cmd->interval_ms); } gIter = next; } /* frees list, but not list elements. */ g_list_free(rsc->recurring_ops); free(rsc->rsc_id); free(rsc->class); free(rsc->provider); free(rsc->type); mainloop_destroy_trigger(rsc->work); free(rsc); } static xmlNode * process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id) { xmlNode *reply = NULL; int rc = pcmk_ok; const char *is_ipc_provider = crm_element_value(request, F_LRMD_IS_IPC_PROVIDER); const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION); if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) { crm_err("Cluster API version must be greater than or equal to %s, not %s", LRMD_MIN_PROTOCOL_VERSION, protocol_version); rc = -EPROTO; } reply = create_lrmd_reply(__func__, rc, call_id); crm_xml_add(reply, F_LRMD_OPERATION, CRM_OP_REGISTER); crm_xml_add(reply, F_LRMD_CLIENTID, client->id); crm_xml_add(reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); if (crm_is_true(is_ipc_provider)) { // This is a remote connection from a cluster node's controller #ifdef SUPPORT_REMOTE ipc_proxy_add_provider(client); #endif } return reply; } static int process_lrmd_rsc_register(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; lrmd_rsc_t *rsc = build_rsc_from_xml(request); lrmd_rsc_t *dup = g_hash_table_lookup(rsc_list, rsc->rsc_id); if (dup && pcmk__str_eq(rsc->class, dup->class, pcmk__str_casei) && pcmk__str_eq(rsc->provider, dup->provider, pcmk__str_casei) && pcmk__str_eq(rsc->type, dup->type, pcmk__str_casei)) { crm_notice("Ignoring duplicate registration of '%s'", rsc->rsc_id); free_rsc(rsc); return rc; } g_hash_table_replace(rsc_list, rsc->rsc_id, rsc); crm_info("Cached agent information for '%s'", rsc->rsc_id); return rc; } static xmlNode * process_lrmd_get_rsc_info(xmlNode *request, int call_id) { int rc = pcmk_ok; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); xmlNode *reply = NULL; lrmd_rsc_t *rsc = NULL; if (rsc_id == NULL) { rc = -ENODEV; } else { rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Agent information for '%s' not in cache", rsc_id); rc = -ENODEV; } } reply = create_lrmd_reply(__func__, rc, call_id); if (rsc) { crm_xml_add(reply, F_LRMD_RSC_ID, rsc->rsc_id); crm_xml_add(reply, F_LRMD_CLASS, rsc->class); crm_xml_add(reply, F_LRMD_PROVIDER, rsc->provider); crm_xml_add(reply, F_LRMD_TYPE, rsc->type); } return reply; } static int process_lrmd_rsc_unregister(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; lrmd_rsc_t *rsc = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); if (!rsc_id) { return -ENODEV; } rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Ignoring unregistration of resource '%s', which is not registered", rsc_id); return pcmk_ok; } if (rsc->active) { /* let the caller know there are still active ops on this rsc to watch for */ crm_trace("Operation (0x%p) still in progress for unregistered resource %s", rsc->active, rsc_id); rc = -EINPROGRESS; } g_hash_table_remove(rsc_list, rsc_id); return rc; } static int process_lrmd_rsc_exec(pcmk__client_t *client, uint32_t id, xmlNode *request) { lrmd_rsc_t *rsc = NULL; lrmd_cmd_t *cmd = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); int call_id; if (!rsc_id) { return -EINVAL; } if (!(rsc = g_hash_table_lookup(rsc_list, rsc_id))) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); return -ENODEV; } cmd = create_lrmd_cmd(request, client); call_id = cmd->call_id; /* Don't reference cmd after handing it off to be scheduled. * The cmd could get merged and freed. */ schedule_lrmd_cmd(rsc, cmd); return call_id; } static int cancel_op(const char *rsc_id, const char *action, guint interval_ms) { GListPtr gIter = NULL; lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, rsc_id); /* How to cancel an action. * 1. Check pending ops list, if it hasn't been handed off * to the service library or stonith recurring list remove * it there and that will stop it. * 2. If it isn't in the pending ops list, then it's either a * recurring op in the stonith recurring list, or the service * library's recurring list. Stop it there * 3. If not found in any lists, then this operation has either * been executed already and is not a recurring operation, or * never existed. */ if (!rsc) { return -ENODEV; } for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) { lrmd_cmd_t *cmd = gIter->data; if (pcmk__str_eq(cmd->action, action, pcmk__str_casei) && (cmd->interval_ms == interval_ms)) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; cmd_finalize(cmd, rsc); return pcmk_ok; } } if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { /* The service library does not handle stonith operations. * We have to handle recurring stonith operations ourselves. */ for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) { lrmd_cmd_t *cmd = gIter->data; if (pcmk__str_eq(cmd->action, action, pcmk__str_casei) && (cmd->interval_ms == interval_ms)) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; if (rsc->active != cmd) { cmd_finalize(cmd, rsc); } return pcmk_ok; } } } else if (services_action_cancel(rsc_id, normalize_action_name(rsc, action), interval_ms) == TRUE) { /* The service library will tell the action_complete callback function * this action was cancelled, which will destroy the cmd and remove * it from the recurring_op list. Do not do that in this function * if the service library says it cancelled it. */ return pcmk_ok; } return -EOPNOTSUPP; } static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id) { GList *cmd_list = NULL; GList *cmd_iter = NULL; /* Notice a copy of each list is created when concat is called. * This prevents odd behavior from occurring when the cmd_list * is iterated through later on. It is possible the cancel_op * function may end up modifying the recurring_ops and pending_ops * lists. If we did not copy those lists, our cmd_list iteration * could get messed up.*/ if (rsc->recurring_ops) { cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->recurring_ops)); } if (rsc->pending_ops) { cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->pending_ops)); } if (!cmd_list) { return; } for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) { lrmd_cmd_t *cmd = cmd_iter->data; if (cmd->interval_ms == 0) { continue; } if (client_id && !pcmk__str_eq(cmd->client_id, client_id, pcmk__str_casei)) { continue; } cancel_op(rsc->rsc_id, cmd->action, cmd->interval_ms); } /* frees only the copied list data, not the cmds */ g_list_free(cmd_list); } static int process_lrmd_rsc_cancel(pcmk__client_t *client, uint32_t id, xmlNode *request) { xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); const char *action = crm_element_value(rsc_xml, F_LRMD_RSC_ACTION); guint interval_ms = 0; crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &interval_ms); if (!rsc_id || !action) { return -EINVAL; } return cancel_op(rsc_id, action, interval_ms); } static void add_recurring_op_xml(xmlNode *reply, lrmd_rsc_t *rsc) { xmlNode *rsc_xml = create_xml_node(reply, F_LRMD_RSC); crm_xml_add(rsc_xml, F_LRMD_RSC_ID, rsc->rsc_id); for (GList *item = rsc->recurring_ops; item != NULL; item = item->next) { lrmd_cmd_t *cmd = item->data; xmlNode *op_xml = create_xml_node(rsc_xml, T_LRMD_RSC_OP); crm_xml_add(op_xml, F_LRMD_RSC_ACTION, (cmd->real_action? cmd->real_action : cmd->action)); crm_xml_add_ms(op_xml, F_LRMD_RSC_INTERVAL, cmd->interval_ms); crm_xml_add_int(op_xml, F_LRMD_TIMEOUT, cmd->timeout_orig); } } static xmlNode * process_lrmd_get_recurring(xmlNode *request, int call_id) { int rc = pcmk_ok; const char *rsc_id = NULL; lrmd_rsc_t *rsc = NULL; xmlNode *reply = NULL; xmlNode *rsc_xml = NULL; // Resource ID is optional rsc_xml = first_named_child(request, F_LRMD_CALLDATA); if (rsc_xml) { rsc_xml = first_named_child(rsc_xml, F_LRMD_RSC); } if (rsc_xml) { rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); } // If resource ID is specified, resource must exist if (rsc_id != NULL) { rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); rc = -ENODEV; } } reply = create_lrmd_reply(__func__, rc, call_id); // If resource ID is not specified, check all resources if (rsc_id == NULL) { GHashTableIter iter; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &rsc)) { add_recurring_op_xml(reply, rsc); } } else if (rsc) { add_recurring_op_xml(reply, rsc); } return reply; } void process_lrmd_message(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; int call_id = 0; const char *op = crm_element_value(request, F_LRMD_OPERATION); int do_reply = 0; int do_notify = 0; xmlNode *reply = NULL; crm_trace("Processing %s operation from %s", op, client->id); crm_element_value_int(request, F_LRMD_CALLID, &call_id); if (pcmk__str_eq(op, CRM_OP_IPC_FWD, pcmk__str_none)) { #ifdef SUPPORT_REMOTE ipc_proxy_forward_client(client, request); #endif do_reply = 1; } else if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none)) { reply = process_lrmd_signon(client, request, call_id); do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_REG, pcmk__str_none)) { rc = process_lrmd_rsc_register(client, id, request); do_notify = 1; do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_INFO, pcmk__str_none)) { reply = process_lrmd_get_rsc_info(request, call_id); do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_UNREG, pcmk__str_none)) { rc = process_lrmd_rsc_unregister(client, id, request); /* don't notify anyone about failed un-registers */ if (rc == pcmk_ok || rc == -EINPROGRESS) { do_notify = 1; } do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_EXEC, pcmk__str_none)) { rc = process_lrmd_rsc_exec(client, id, request); do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_CANCEL, pcmk__str_none)) { rc = process_lrmd_rsc_cancel(client, id, request); do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_POKE, pcmk__str_none)) { do_notify = 1; do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_CHECK, pcmk__str_none)) { xmlNode *data = get_message_xml(request, F_LRMD_CALLDATA); const char *timeout = crm_element_value(data, F_LRMD_WATCHDOG); CRM_LOG_ASSERT(data != NULL); pcmk__valid_sbd_timeout(timeout); } else if (pcmk__str_eq(op, LRMD_OP_ALERT_EXEC, pcmk__str_none)) { rc = process_lrmd_alert_exec(client, id, request); do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_GET_RECURRING, pcmk__str_none)) { reply = process_lrmd_get_recurring(request, call_id); do_reply = 1; } else { rc = -EOPNOTSUPP; do_reply = 1; crm_err("Unknown IPC request '%s' from %s", op, client->name); } crm_debug("Processed %s operation from %s: rc=%d, reply=%d, notify=%d", op, client->id, rc, do_reply, do_notify); if (do_reply) { int send_rc = pcmk_rc_ok; if (reply == NULL) { reply = create_lrmd_reply(__func__, rc, call_id); } send_rc = lrmd_server_send_reply(client, id, reply); free_xml(reply); if (send_rc != pcmk_rc_ok) { crm_warn("Reply to client %s failed: %s " CRM_XS " %d", client->name, pcmk_rc_str(send_rc), send_rc); } } if (do_notify) { send_generic_notify(rc, request); } } diff --git a/doc/sphinx/Pacemaker_Explained/advanced-resources.rst b/doc/sphinx/Pacemaker_Explained/advanced-resources.rst index c2d952d098..4e6a5c6d47 100644 --- a/doc/sphinx/Pacemaker_Explained/advanced-resources.rst +++ b/doc/sphinx/Pacemaker_Explained/advanced-resources.rst @@ -1,1642 +1,1643 @@ Advanced Resource Types ----------------------- .. index: single: group resource single: resource; group .. _group-resources: Groups - A Syntactic Shortcut ############################# One of the most common elements of a cluster is a set of resources that need to be located together, start sequentially, and stop in the reverse order. To simplify this configuration, we support the concept of groups. .. topic:: A group of two primitive resources .. code-block:: xml Although the example above contains only two resources, there is no limit to the number of resources a group can contain. The example is also sufficient to explain the fundamental properties of a group: * Resources are started in the order they appear in (**Public-IP** first, then **Email**) * Resources are stopped in the reverse order to which they appear in (**Email** first, then **Public-IP**) If a resource in the group can't run anywhere, then nothing after that is allowed to run, too. * If **Public-IP** can't run anywhere, neither can **Email**; * but if **Email** can't run anywhere, this does not affect **Public-IP** in any way The group above is logically equivalent to writing: .. topic:: How the cluster sees a group resource .. code-block:: xml Obviously as the group grows bigger, the reduced configuration effort can become significant. Another (typical) example of a group is a DRBD volume, the filesystem mount, an IP address, and an application that uses them. .. index:: pair: XML element; group Group Properties ________________ .. table:: **Properties of a Group Resource** +-------+--------------------------------------+ | Field | Description | +=======+======================================+ | id | .. index:: | | | single: group; property, id | | | single: property; id (group) | | | single: id; group property | | | | | | A unique name for the group | +-------+--------------------------------------+ Group Options _____________ Groups inherit the ``priority``, ``target-role``, and ``is-managed`` properties from primitive resources. See :ref:`resource_options` for information about those properties. Group Instance Attributes _________________________ Groups have no instance attributes. However, any that are set for the group object will be inherited by the group's children. Group Contents ______________ Groups may only contain a collection of cluster resources (see :ref:`primitive-resource`). To refer to a child of a group resource, just use the child's ``id`` instead of the group's. Group Constraints _________________ Although it is possible to reference a group's children in constraints, it is usually preferable to reference the group itself. .. topic:: Some constraints involving groups .. code-block:: xml .. index:: pair: resource-stickiness; group Group Stickiness ________________ Stickiness, the measure of how much a resource wants to stay where it is, is additive in groups. Every active resource of the group will contribute its stickiness value to the group's total. So if the default ``resource-stickiness`` is 100, and a group has seven members, five of which are active, then the group as a whole will prefer its current location with a score of 500. .. index:: single: clone resource single: resource; clone .. _s-resource-clone: Clones - Resources That Can Have Multiple Active Instances ########################################################## *Clone* resources are resources that can have more than one copy active at the same time. This allows you, for example, to run a copy of a daemon on every node. You can clone any primitive or group resource [#]_. Anonymous versus Unique Clones ______________________________ A clone resource is configured to be either *anonymous* or *globally unique*. Anonymous clones are the simplest. These behave completely identically everywhere they are running. Because of this, there can be only one instance of an anonymous clone active per node. The instances of globally unique clones are distinct entities. All instances are launched identically, but one instance of the clone is not identical to any other instance, whether running on the same node or a different node. As an example, a cloned IP address can use special kernel functionality such that each instance handles a subset of requests for the same IP address. .. index:: single: Promotable Clone Resources single: resource; promotable .. _s-resource-promotable: Promotable clones _________________ If a clone is *promotable*, its instances can perform a special role that Pacemaker will manage via the ``promote`` and ``demote`` actions of the resource agent. Services that support such a special role have various terms for the special role and the default role: primary and secondary, master and replica, controller and worker, etc. Pacemaker uses the terms *master* and *slave* [#]_, but is agnostic to what the service calls them or what they do. All that Pacemaker cares about is that an instance comes up in the default role when started, and the resource agent supports the ``promote`` and ``demote`` actions to manage entering and exiting the special role. .. index:: pair: XML element; clone Clone Properties ________________ .. table:: **Properties of a Clone Resource** +-------+--------------------------------------+ | Field | Description | +=======+======================================+ | id | .. index:: | | | single: clone; property, id | | | single: property; id (clone) | | | single: id; clone property | | | | | | A unique name for the clone | +-------+--------------------------------------+ .. index:: pair: options; clone Clone Options _____________ :ref:`Options ` inherited from primitive resources: ``priority, target-role, is-managed`` .. table:: **Clone-specific configuration options** +-------------------+-----------------+-------------------------------------------------------+ | Field | Default | Description | +===================+=================+=======================================================+ | globally-unique | false | .. index:: | | | | single: clone; option, globally-unique | | | | single: option; globally-unique (clone) | | | | single: globally-unique; clone option | | | | | | | | If **true**, each clone instance performs a | | | | distinct function | +-------------------+-----------------+-------------------------------------------------------+ | clone-max | number of nodes | .. index:: | | | in the cluster | single: clone; option, clone-max | | | | single: option; clone-max (clone) | | | | single: clone-max; clone option | | | | | | | | The maximum number of clone instances that can | | | | be started across the entire cluster | +-------------------+-----------------+-------------------------------------------------------+ | clone-node-max | 1 | .. index:: | | | | single: clone; option, clone-node-max | | | | single: option; clone-node-max (clone) | | | | single: clone-node-max; clone option | | | | | | | | If ``globally-unique`` is **true**, the maximum | | | | number of clone instances that can be started | | | | on a single node | +-------------------+-----------------+-------------------------------------------------------+ | clone-min | 0 | .. index:: | | | | single: clone; option, clone-min | | | | single: option; clone-min (clone) | | | | single: clone-min; clone option | | | | | | | | Require at least this number of clone instances | | | | to be runnable before allowing resources | | | | depending on the clone to be runnable. A value | | | | of 0 means require all clone instances to be | | | | runnable. | +-------------------+-----------------+-------------------------------------------------------+ | notify | false | .. index:: | | | | single: clone; option, notify | | | | single: option; notify (clone) | | | | single: notify; clone option | | | | | | | | Call the resource agent's **notify** action for | | | | all active instances, before and after starting | | | | or stopping any clone instance. The resource | | | | agent must support this action. | | | | Allowed values: **false**, **true** | +-------------------+-----------------+-------------------------------------------------------+ | ordered | false | .. index:: | | | | single: clone; option, ordered | | | | single: option; ordered (clone) | | | | single: ordered; clone option | | | | | | | | If **true**, clone instances must be started | | | | sequentially instead of in parallel. | | | | Allowed values: **false**, **true** | +-------------------+-----------------+-------------------------------------------------------+ | interleave | false | .. index:: | | | | single: clone; option, interleave | | | | single: option; interleave (clone) | | | | single: interleave; clone option | | | | | | | | When this clone is ordered relative to another | | | | clone, if this option is **false** (the default), | | | | the ordering is relative to *all* instances of | | | | the other clone, whereas if this option is | | | | **true**, the ordering is relative only to | | | | instances on the same node. | | | | Allowed values: **false**, **true** | +-------------------+-----------------+-------------------------------------------------------+ | promotable | false | .. index:: | | | | single: clone; option, promotable | | | | single: option; promotable (clone) | | | | single: promotable; clone option | | | | | | | | If **true**, clone instances can perform a | | | | special role that Pacemaker will manage via the | | | | resource agent's **promote** and **demote** | | | | actions. The resource agent must support these | | | | actions. | | | | Allowed values: **false**, **true** | +-------------------+-----------------+-------------------------------------------------------+ | promoted-max | 1 | .. index:: | | | | single: clone; option, promoted-max | | | | single: option; promoted-max (clone) | | | | single: promoted-max; clone option | | | | | | | | If ``promotable`` is **true**, the number of | | | | instances that can be promoted at one time | | | | across the entire cluster | +-------------------+-----------------+-------------------------------------------------------+ | promoted-node-max | 1 | .. index:: | | | | single: clone; option, promoted-node-max | | | | single: option; promoted-node-max (clone) | | | | single: promoted-node-max; clone option | | | | | | | | If ``promotable`` is **true** and ``globally-unique`` | | | | is **false**, the number of clone instances can be | | | | promoted at one time on a single node | +-------------------+-----------------+-------------------------------------------------------+ For backward compatibility, ``master-max`` and ``master-node-max`` are accepted as aliases for ``promoted-max`` and ``promoted-node-max``, but are deprecated since 2.0.0, and support for them will be removed in a future version. Clone Contents ______________ Clones must contain exactly one primitive or group resource. .. topic:: A clone that runs a web server on all nodes .. code-block:: xml .. warning:: You should never reference the name of a clone's child (the primitive or group resource being cloned). If you think you need to do this, you probably need to re-evaluate your design. Clone Instance Attribute ________________________ Clones have no instance attributes; however, any that are set here will be inherited by the clone's child. Clone Constraints _________________ In most cases, a clone will have a single instance on each active cluster node. If this is not the case, you can indicate which nodes the cluster should preferentially assign copies to with resource location constraints. These constraints are written no differently from those for primitive resources except that the clone's **id** is used. .. topic:: Some constraints involving clones .. code-block:: xml Ordering constraints behave slightly differently for clones. In the example above, ``apache-stats`` will wait until all copies of ``apache-clone`` that need to be started have done so before being started itself. Only if *no* copies can be started will ``apache-stats`` be prevented from being active. Additionally, the clone will wait for ``apache-stats`` to be stopped before stopping itself. Colocation of a primitive or group resource with a clone means that the resource can run on any node with an active instance of the clone. The cluster will choose an instance based on where the clone is running and the resource's own location preferences. Colocation between clones is also possible. If one clone **A** is colocated with another clone **B**, the set of allowed locations for **A** is limited to nodes on which **B** is (or will be) active. Placement is then performed normally. Promotable Clone Constraints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For promotable clone resources, the ``first-action`` and/or ``then-action`` fields for ordering constraints may be set to ``promote`` or ``demote`` to constrain the master role, and colocation constraints may contain ``rsc-role`` and/or ``with-rsc-role`` fields. .. index:: single: constraint; colocation .. table:: **Additional colocation constraint options for promotable clone resources** +---------------+---------+-------------------------------------------------------+ | Field | Default | Description | +===============+=========+=======================================================+ | rsc-role | Started | .. index:: | | | | single: clone; ordering constraint, rsc-role | | | | single: ordering constraint; rsc-role (clone) | | | | single: rsc-role; clone ordering constraint | | | | | | | | An additional attribute of colocation constraints | | | | that specifies the role that ``rsc`` must be in. | | | | Allowed values: **Started**, **Master**, **Slave**. | +---------------+---------+-------------------------------------------------------+ | with-rsc-role | Started | .. index:: | | | | single: clone; ordering constraint, with-rsc-role | | | | single: ordering constraint; with-rsc-role (clone) | | | | single: with-rsc-role; clone ordering constraint | | | | | | | | An additional attribute of colocation constraints | | | | that specifies the role that ``with-rsc`` must be in. | | | | Allowed values: **Started**, **Master**, **Slave**. | +---------------+---------+-------------------------------------------------------+ .. topic:: Constraints involving promotable clone resources .. code-block:: xml In the example above, **myApp** will wait until one of the database copies has been started and promoted to master before being started itself on the same node. Only if no copies can be promoted will **myApp** be prevented from being active. Additionally, the cluster will wait for **myApp** to be stopped before demoting the database. Colocation of a primitive or group resource with a promotable clone resource means that it can run on any node with an active instance of the promotable clone resource that has the specified role (**master** or **slave**). In the example above, the cluster will choose a location based on where database is running as a **master**, and if there are multiple **master** instances it will also factor in **myApp**'s own location preferences when deciding which location to choose. Colocation with regular clones and other promotable clone resources is also possible. In such cases, the set of allowed locations for the **rsc** clone is (after role filtering) limited to nodes on which the ``with-rsc`` promotable clone resource is (or will be) in the specified role. Placement is then performed as normal. Using Promotable Clone Resources in Colocation Sets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. index:: single: constraint; colocation single: constraint; resource set .. table:: **Additional colocation set options relevant to promotable clone resources** +-------+---------+-----------------------------------------------------+ | Field | Default | Description | +=======+=========+=====================================================+ | role | Started | .. index:: | | | | single: clone; ordering constraint; role | | | | single: ordering constraint; role (clone) | | | | single: role; clone ordering constraint | | | | | | | | The role that *all members* of the set must be in. | | | | Allowed values: **Started**, **Master**, **Slave**. | +-------+---------+-----------------------------------------------------+ In the following example **B**'s master must be located on the same node as **A**'s master. Additionally resources **C** and **D** must be located on the same node as **A**'s and **B**'s masters. .. topic:: Colocate C and D with A's and B's master instances .. code-block:: xml Using Promotable Clone Resources in Ordered Sets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. index:: single: constraint; colocation single: constraint; resource set .. table:: **Additional ordered set options relevant to promotable clone resources** +--------+------------------+-----------------------------------------------------+ | Field | Default | Description | +=======++==================+=====================================================+ | action | value of | .. index:: | | | ``first-action`` | single: clone; ordering constraint; action | | | | single: ordering constraintl action (clone) | | | | single: action; clone ordering constraint | | | | | | | | An additional attribute of ordering constraint | | | | sets that specifies the action that applies to | | | | *all members* of the set. | | | | Allowed values: **start**, **stop**, **promote**, | | | | **demote**. | +--------+------------------+-----------------------------------------------------+ .. topic:: Start C and D after first promoting A and B .. code-block:: xml In the above example, **B** cannot be promoted to a master role until **A** has been promoted. Additionally, resources **C** and **D** must wait until **A** and **B** have been promoted before they can start. .. index:: pair: resource-stickiness; clone .. _s-clone-stickiness: Clone Stickiness ________________ To achieve a stable allocation pattern, clones are slightly sticky by default. If no value for ``resource-stickiness`` is provided, the clone will use a value of 1. Being a small value, it causes minimal disturbance to the score calculations of other resources but is enough to prevent Pacemaker from needlessly moving copies around the cluster. .. note:: For globally unique clones, this may result in multiple instances of the clone staying on a single node, even after another eligible node becomes active (for example, after being put into standby mode then made active again). If you do not want this behavior, specify a ``resource-stickiness`` of 0 for the clone temporarily and let the cluster adjust, then set it back to 1 if you want the default behavior to apply again. .. important:: If ``resource-stickiness`` is set in the ``rsc_defaults`` section, it will apply to clone instances as well. This means an explicit ``resource-stickiness`` of 0 in ``rsc_defaults`` works differently from the implicit default used when ``resource-stickiness`` is not specified. Clone Resource Agent Requirements _________________________________ Any resource can be used as an anonymous clone, as it requires no additional support from the resource agent. Whether it makes sense to do so depends on your resource and its resource agent. Resource Agent Requirements for Globally Unique Clones ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Globally unique clones require additional support in the resource agent. In particular, it must only respond with ``${OCF_SUCCESS}`` if the node has that exact instance active. All other probes for instances of the clone should result in ``${OCF_NOT_RUNNING}`` (or one of the other OCF error codes if they are failed). Individual instances of a clone are identified by appending a colon and a numerical offset, e.g. **apache:2**. Resource agents can find out how many copies there are by examining the ``OCF_RESKEY_CRM_meta_clone_max`` environment variable and which instance it is by examining ``OCF_RESKEY_CRM_meta_clone``. The resource agent must not make any assumptions (based on ``OCF_RESKEY_CRM_meta_clone``) about which numerical instances are active. In particular, the list of active copies will not always be an unbroken sequence, nor always start at 0. Resource Agent Requirements for Promotable Clones ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Promotable clone resources require two extra actions, ``demote`` and ``promote``, which are responsible for changing the state of the resource. Like **start** and **stop**, they should return ``${OCF_SUCCESS}`` if they completed successfully or a relevant error code if they did not. The states can mean whatever you wish, but when the resource is started, it must come up in the mode called **slave**. From there the cluster will decide which instances to promote to **master**. In addition to the clone requirements for monitor actions, agents must also *accurately* report which state they are in. The cluster relies on the agent to report its status (including role) accurately and does not indicate to the agent what role it currently believes it to be in. .. table:: **Role implications of OCF return codes** +---------------------+------------------------------------------------+ | Monitor Return Code | Description | +=====================+================================================+ | OCF_NOT_RUNNING | .. index:: | | | single: OCF_NOT_RUNNING | | | single: OCF return code; OCF_NOT_RUNNING | | | | | | Stopped | +---------------------+------------------------------------------------+ | OCF_SUCCESS | .. index:: | | | single: OCF_SUCCESS | | | single: OCF return code; OCF_SUCCESS | | | | | | Running (Slave) | +---------------------+------------------------------------------------+ | OCF_RUNNING_MASTER | .. index:: | | | single: OCF_RUNNING_MASTER | | | single: OCF return code; OCF_RUNNING_MASTER | | | | | | Running (Master) | +---------------------+------------------------------------------------+ | OCF_FAILED_MASTER | .. index:: | | | single: OCF_FAILED_MASTER | | | single: OCF return code; OCF_FAILED_MASTER | | | | | | Failed (Master) | +---------------------+------------------------------------------------+ | Other | .. index:: | | | single: return code | | | | | | Failed (Slave) | +---------------------+------------------------------------------------+ Clone Notifications ~~~~~~~~~~~~~~~~~~~ If the clone has the ``notify`` meta-attribute set to **true**, and the resource agent supports the ``notify`` action, Pacemaker will call the action when appropriate, passing a number of extra variables which, when combined with additional context, can be used to calculate the current state of the cluster and what is about to happen to it. .. index:: single: clone; environment variables single: notify; environment variables .. table:: **Environment variables supplied with Clone notify actions** +----------------------------------------------+-------------------------------------------------------------------------------+ | Variable | Description | +==============================================+===============================================================================+ | OCF_RESKEY_CRM_meta_notify_type | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_type | | | single: OCF_RESKEY_CRM_meta_notify_type | | | | | | Allowed values: **pre**, **post** | +----------------------------------------------+-------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_operation | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_operation | | | single: OCF_RESKEY_CRM_meta_notify_operation | | | | | | Allowed values: **start**, **stop** | +----------------------------------------------+-------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_start_resource | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_start_resource | | | single: OCF_RESKEY_CRM_meta_notify_start_resource | | | | | | Resources to be started | +----------------------------------------------+-------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_stop_resource | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_resource | | | single: OCF_RESKEY_CRM_meta_notify_stop_resource | | | | | | Resources to be stopped | +----------------------------------------------+-------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_active_resource | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_active_resource | | | single: OCF_RESKEY_CRM_meta_notify_active_resource | | | | | | Resources that are running | +----------------------------------------------+-------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_inactive_resource | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_inactive_resource | | | single: OCF_RESKEY_CRM_meta_notify_inactive_resource | | | | | | Resources that are not running | +----------------------------------------------+-------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_start_uname | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_start_uname | | | single: OCF_RESKEY_CRM_meta_notify_start_uname | | | | | | Nodes on which resources will be started | +----------------------------------------------+-------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_stop_uname | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_uname | | | single: OCF_RESKEY_CRM_meta_notify_stop_uname | | | | | | Nodes on which resources will be stopped | +----------------------------------------------+-------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_active_uname | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_active_uname | | | single: OCF_RESKEY_CRM_meta_notify_active_uname | | | | | | Nodes on which resources are running | +----------------------------------------------+-------------------------------------------------------------------------------+ The variables come in pairs, such as ``OCF_RESKEY_CRM_meta_notify_start_resource`` and ``OCF_RESKEY_CRM_meta_notify_start_uname``, and should be treated as an array of whitespace-separated elements. ``OCF_RESKEY_CRM_meta_notify_inactive_resource`` is an exception, as the matching **uname** variable does not exist since inactive resources are not running on any node. Thus, in order to indicate that **clone:0** will be started on **sles-1**, **clone:2** will be started on **sles-3**, and **clone:3** will be started on **sles-2**, the cluster would set: .. topic:: Notification variables .. code-block:: none OCF_RESKEY_CRM_meta_notify_start_resource="clone:0 clone:2 clone:3" OCF_RESKEY_CRM_meta_notify_start_uname="sles-1 sles-3 sles-2" .. note:: Pacemaker will log but otherwise ignore failures of notify actions. Interpretation of Notification Variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Pre-notification (stop):** * Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` * Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` **Post-notification (stop) / Pre-notification (start):** * Active resources * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * Inactive resources * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` **Post-notification (start):** * Active resources: * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Inactive resources: * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` Extra Notifications for Promotable Clones ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. index:: single: clone; environment variables single: promotable; environment variables .. table:: **Extra environment variables supplied for promotable clones** +---------------------------------------------+------------------------------------------------------------------------------+ | Variable | Description | +=============================================+==============================================================================+ | OCF_RESKEY_CRM_meta_notify_master_resource | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_master_resource | | | single: OCF_RESKEY_CRM_meta_notify_master_resource | | | | | | Resources that are running in **Master** mode | +---------------------------------------------+------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_slave_resource | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_slave_resource | | | single: OCF_RESKEY_CRM_meta_notify_slave_resource | | | | | | Resources that are running in **Slave** mode | +---------------------------------------------+------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_promote_resource | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_resource | | | single: OCF_RESKEY_CRM_meta_notify_promote_resource | | | | | | Resources to be promoted | +---------------------------------------------+------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_demote_resource | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_resource | | | single: OCF_RESKEY_CRM_meta_notify_demote_resource | | | | | | Resources to be demoted | +---------------------------------------------+------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_promote_uname | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_uname | | | single: OCF_RESKEY_CRM_meta_notify_promote_uname | | | | | | Nodes on which resources will be promoted | +---------------------------------------------+------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_demote_uname | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_uname | | | single: OCF_RESKEY_CRM_meta_notify_demote_uname | | | | | | Nodes on which resources will be demoted | +---------------------------------------------+------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_master_uname | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_master_uname | | | single: OCF_RESKEY_CRM_meta_notify_master_uname | | | | | | Nodes on which resources are running in **Master** mode | +---------------------------------------------+------------------------------------------------------------------------------+ | OCF_RESKEY_CRM_meta_notify_slave_uname | .. index:: | | | single: environment variable; OCF_RESKEY_CRM_meta_notify_slave_uname | | | single: OCF_RESKEY_CRM_meta_notify_slave_uname | | | | | | Nodes on which resources are running in **Slave** mode | +---------------------------------------------+------------------------------------------------------------------------------+ Interpretation of Promotable Notification Variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Pre-notification (demote):** * **Active** resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` * **Master** resources: ``$OCF_RESKEY_CRM_meta_notify_master_resource`` * **Slave** resources: ``$OCF_RESKEY_CRM_meta_notify_slave_resource`` * Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` **Post-notification (demote) / Pre-notification (stop):** * **Active** resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource`` * **Master** resources: * ``$OCF_RESKEY_CRM_meta_notify_master_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * **Slave** resources: ``$OCF_RESKEY_CRM_meta_notify_slave_resource`` * Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` **Post-notification (stop) / Pre-notification (start)** * **Active** resources: * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * **Master** resources: * ``$OCF_RESKEY_CRM_meta_notify_master_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * **Slave** resources: * ``$OCF_RESKEY_CRM_meta_notify_slave_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * Inactive resources: * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` **Post-notification (start) / Pre-notification (promote)** * **Active** resources: * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * **Master** resources: * ``$OCF_RESKEY_CRM_meta_notify_master_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * **Slave** resources: * ``$OCF_RESKEY_CRM_meta_notify_slave_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Inactive resources: * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` **Post-notification (promote)** * **Active** resources: * ``$OCF_RESKEY_CRM_meta_notify_active_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * **Master** resources: * ``$OCF_RESKEY_CRM_meta_notify_master_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` * **Slave** resources: * ``$OCF_RESKEY_CRM_meta_notify_slave_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` * Inactive resources: * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource`` * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` * Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource`` * Resources that were promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` * Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` Monitoring Promotable Clone Resources _____________________________________ The usual monitor actions are insufficient to monitor a promotable clone resource, because Pacemaker needs to verify not only that the resource is active, but also that its actual role matches its intended one. Define two monitoring actions: the usual one will cover the slave role, and an additional one with ``role="master"`` will cover the master role. .. topic:: Monitoring both states of a promotable clone resource .. code-block:: xml .. important:: It is crucial that *every* monitor operation has a different interval! Pacemaker currently differentiates between operations only by resource and interval; so if (for example) a promotable clone resource had the same monitor interval for both roles, Pacemaker would ignore the role when checking the status -- which would cause unexpected return codes, and therefore unnecessary complications. .. _s-promotion-scores: Determining Which Instance is Promoted ______________________________________ Pacemaker can choose a promotable clone instance to be promoted in one of two ways: * Promotion scores: These are node attributes set via the ``crm_master`` utility, which generally would be called by the resource agent's start action if it supports promotable clones. This tool automatically detects both the resource and host, and should be used to set a preference for being promoted. Based on this, ``promoted-max``, and ``promoted-node-max``, the instance(s) with the highest preference will be promoted. * Constraints: Location constraints can indicate which nodes are most preferred as masters. .. topic:: Explicitly preferring node1 to be promoted to master .. code-block:: xml .. index: single: bundle resource single: resource; bundle pair: container; Docker pair: container; podman pair: container; rkt .. _s-resource-bundle: Bundles - Isolated Environments ############################### Pacemaker supports a special syntax for launching a `container `_ with any infrastructure it requires: the *bundle*. Pacemaker bundles support `Docker `_, -`podman `_, and `rkt `_ -container technologies. [#]_ +`podman `_ *(since 2.0.1)*, and +`rkt `_ container technologies. [#]_ .. topic:: A bundle for a containerized web server .. code-block:: xml .. index: single: bundle resource single: resource; bundle Bundle Prerequisites ____________________ Before configuring a bundle in Pacemaker, the user must install the appropriate container launch technology (Docker, podman, or rkt), and supply a fully configured container image, on every node allowed to run the bundle. Pacemaker will create an implicit resource of type **ocf:heartbeat:docker**, **ocf:heartbeat:podman**, or **ocf:heartbeat:rkt** to manage a bundle's container. The user must ensure that the appropriate resource agent is installed on every node allowed to run the bundle. .. index:: pair: XML element; bundle Bundle Properties _________________ .. table:: **XML Attributes of a bundle Element** +-------------+-----------------------------------------------+ | Attribute | Description | +=============+===============================================+ | id | .. index:: | | | single: bundle; attribute, id | | | single: attribute; id (bundle) | | | single: id; bundle attribute | | | | | | A unique name for the bundle (required) | +-------------+-----------------------------------------------+ | description | .. index:: | | | single: bundle; attribute, description | | | single: attribute; description (bundle) | | | single: description; bundle attribute | | | | | | Arbitrary text (not used by Pacemaker) | +-------------+-----------------------------------------------+ A bundle must contain exactly one ``docker``, ``podman``, or ``rkt`` element. .. index:: pair: XML element; docker pair: XML element; podman pair: XML element; rkt single: resource; bundle Bundle Container Properties ___________________________ .. table:: **XML attributes of a docker, podman, or rkt Element** +-------------------+------------------------------------+---------------------------------------------------+ | Attribute | Default | Description | +===================+====================================+===================================================+ | image | | .. index:: | | | | single: docker; attribute, image | | | | single: attribute; image (docker) | | | | single: image; docker attribute | | | | single: podman; attribute, image | | | | single: attribute; image (podman) | | | | single: image; podman attribute | | | | single: rkt; attribute, image | | | | single: attribute; image (rkt) | | | | single: image; rkt attribute | | | | | | | | Container image tag (required) | +-------------------+------------------------------------+---------------------------------------------------+ | replicas | Value of ``promoted-max`` | .. index:: | | | if that is positive, else 1 | single: docker; attribute, replicas | | | | single: attribute; replicas (docker) | | | | single: replicas; docker attribute | | | | single: podman; attribute, replicas | | | | single: attribute; replicas (podman) | | | | single: replicas; podman attribute | | | | single: rkt; attribute, replicas | | | | single: attribute; replicas (rkt) | | | | single: replicas; rkt attribute | | | | | | | | A positive integer specifying the number of | | | | container instances to launch | +-------------------+------------------------------------+---------------------------------------------------+ | replicas-per-host | 1 | .. index:: | | | | single: docker; attribute, replicas-per-host | | | | single: attribute; replicas-per-host (docker) | | | | single: replicas-per-host; docker attribute | | | | single: podman; attribute, replicas-per-host | | | | single: attribute; replicas-per-host (podman) | | | | single: replicas-per-host; podman attribute | | | | single: rkt; attribute, replicas-per-host | | | | single: attribute; replicas-per-host (rkt) | | | | single: replicas-per-host; rkt attribute | | | | | | | | A positive integer specifying the number of | | | | container instances allowed to run on a | | | | single node | +-------------------+------------------------------------+---------------------------------------------------+ | promoted-max | 0 | .. index:: | | | | single: docker; attribute, promoted-max | | | | single: attribute; promoted-max (docker) | | | | single: promoted-max; docker attribute | | | | single: podman; attribute, promoted-max | | | | single: attribute; promoted-max (podman) | | | | single: promoted-max; podman attribute | | | | single: rkt; attribute, promoted-max | | | | single: attribute; promoted-max (rkt) | | | | single: promoted-max; rkt attribute | | | | | | | | A non-negative integer that, if positive, | | | | indicates that the containerized service | | | | should be treated as a promotable service, | | | | with this many replicas allowed to run the | | | | service in the master role | +-------------------+------------------------------------+---------------------------------------------------+ | network | | .. index:: | | | | single: docker; attribute, network | | | | single: attribute; network (docker) | | | | single: network; docker attribute | | | | single: podman; attribute, network | | | | single: attribute; network (podman) | | | | single: network; podman attribute | | | | single: rkt; attribute, network | | | | single: attribute; network (rkt) | | | | single: network; rkt attribute | | | | | | | | If specified, this will be passed to the | | | | ``docker run``, ``podman run``, or | | | | ``rkt run`` command as the network setting | | | | for the container. | +-------------------+------------------------------------+---------------------------------------------------+ | run-command | ``/usr/sbin/pacemaker-remoted`` if | .. index:: | | | bundle contains a **primitive**, | single: docker; attribute, run-command | | | otherwise none | single: attribute; run-command (docker) | | | | single: run-command; docker attribute | | | | single: podman; attribute, run-command | | | | single: attribute; run-command (podman) | | | | single: run-command; podman attribute | | | | single: rkt; attribute, run-command | | | | single: attribute; run-command (rkt) | | | | single: run-command; rkt attribute | | | | | | | | This command will be run inside the container | | | | when launching it ("PID 1"). If the bundle | | | | contains a **primitive**, this command *must* | | | | start ``pacemaker-remoted`` (but could, for | | | | example, be a script that does other stuff, too). | +-------------------+------------------------------------+---------------------------------------------------+ | options | | .. index:: | | | | single: docker; attribute, options | | | | single: attribute; options (docker) | | | | single: options; docker attribute | | | | single: podman; attribute, options | | | | single: attribute; options (podman) | | | | single: options; podman attribute | | | | single: rkt; attribute, options | | | | single: attribute; options (rkt) | | | | single: options; rkt attribute | | | | | | | | Extra command-line options to pass to the | | | | ``docker run``, ``podman run``, or ``rkt run`` | | | | command | +-------------------+------------------------------------+---------------------------------------------------+ .. note:: Considerations when using cluster configurations or container images from Pacemaker 1.1: * If the container image has a pre-2.0.0 version of Pacemaker, set ``run-command`` to ``/usr/sbin/pacemaker_remoted`` (note the underbar instead of dash). * ``masters`` is accepted as an alias for ``promoted-max``, but is deprecated since 2.0.0, and support for it will be removed in a future version. Bundle Network Properties _________________________ A bundle may optionally contain one ```` element. .. index:: pair: XML element; network single: resource; bundle single: bundle; networking .. topic:: **XML attributes of a network Element** +----------------+---------+------------------------------------------------------------+ | Attribute | Default | Description | +================+=========+============================================================+ | add-host | TRUE | .. index:: | | | | single: network; attribute, add-host | | | | single: attribute; add-host (network) | | | | single: add-host; network attribute | | | | | | | | If TRUE, and ``ip-range-start`` is used, Pacemaker will | | | | automatically ensure that ``/etc/hosts`` inside the | | | | containers has entries for each | | | | :ref:`replica name ` | | | | and its assigned IP. | +----------------+---------+------------------------------------------------------------+ | ip-range-start | | .. index:: | | | | single: network; attribute, ip-range-start | | | | single: attribute; ip-range-start (network) | | | | single: ip-range-start; network attribute | | | | | | | | If specified, Pacemaker will create an implicit | | | | ``ocf:heartbeat:IPaddr2`` resource for each container | | | | instance, starting with this IP address, using up to | | | | ``replicas`` sequential addresses. These addresses can be | | | | used from the host's network to reach the service inside | | | | the container, though it is not visible within the | | | | container itself. Only IPv4 addresses are currently | | | | supported. | +----------------+---------+------------------------------------------------------------+ | host-netmask | 32 | .. index:: | | | | single: network; attribute; host-netmask | | | | single: attribute; host-netmask (network) | | | | single: host-netmask; network attribute | | | | | | | | If ``ip-range-start`` is specified, the IP addresses | | | | are created with this CIDR netmask (as a number of bits). | +----------------+---------+------------------------------------------------------------+ | host-interface | | .. index:: | | | | single: network; attribute; host-interface | | | | single: attribute; host-interface (network) | | | | single: host-interface; network attribute | | | | | | | | If ``ip-range-start`` is specified, the IP addresses are | | | | created on this host interface (by default, it will be | | | | determined from the IP address). | +----------------+---------+------------------------------------------------------------+ | control-port | 3121 | .. index:: | | | | single: network; attribute; control-port | | | | single: attribute; control-port (network) | | | | single: control-port; network attribute | | | | | | | | If the bundle contains a ``primitive``, the cluster will | | | | use this integer TCP port for communication with | | | | Pacemaker Remote inside the container. Changing this is | | | | useful when the container is unable to listen on the | | | | default port, for example, when the container uses the | | | | host's network rather than ``ip-range-start`` (in which | | | | case ``replicas-per-host`` must be 1), or when the bundle | | | | may run on a Pacemaker Remote node that is already | | | | listening on the default port. Any ``PCMK_remote_port`` | | | | environment variable set on the host or in the container | | | | is ignored for bundle connections. | +----------------+---------+------------------------------------------------------------+ .. _s-resource-bundle-note-replica-names: .. note:: Replicas are named by the bundle id plus a dash and an integer counter starting with zero. For example, if a bundle named **httpd-bundle** has **replicas=2**, its containers will be named **httpd-bundle-0** and **httpd-bundle-1**. .. index:: pair: XML element; port-mapping Additionally, a ``network`` element may optionally contain one or more ``port-mapping`` elements. .. table:: **Attributes of a port-mapping Element** +---------------+-------------------+------------------------------------------------------+ | Attribute | Default | Description | +===============+===================+======================================================+ | id | | .. index:: | | | | single: port-mapping; attribute, id | | | | single: attribute; id (port-mapping) | | | | single: id; port-mapping attribute | | | | | | | | A unique name for the port mapping (required) | +---------------+-------------------+------------------------------------------------------+ | port | | .. index:: | | | | single: port-mapping; attribute, port | | | | single: attribute; port (port-mapping) | | | | single: port; port-mapping attribute | | | | | | | | If this is specified, connections to this TCP port | | | | number on the host network (on the container's | | | | assigned IP address, if ``ip-range-start`` is | | | | specified) will be forwarded to the container | | | | network. Exactly one of ``port`` or ``range`` | | | | must be specified in a ``port-mapping``. | +---------------+-------------------+------------------------------------------------------+ | internal-port | value of ``port`` | .. index:: | | | | single: port-mapping; attribute, internal-port | | | | single: attribute; internal-port (port-mapping) | | | | single: internal-port; port-mapping attribute | | | | | | | | If ``port`` and this are specified, connections | | | | to ``port`` on the host's network will be | | | | forwarded to this port on the container network. | +---------------+-------------------+------------------------------------------------------+ | range | | .. index:: | | | | single: port-mapping; attribute, range | | | | single: attribute; range (port-mapping) | | | | single: range; port-mapping attribute | | | | | | | | If this is specified, connections to these TCP | | | | port numbers (expressed as *first_port*-*last_port*) | | | | on the host network (on the container's assigned IP | | | | address, if ``ip-range-start`` is specified) will | | | | be forwarded to the same ports in the container | | | | network. Exactly one of ``port`` or ``range`` | | | | must be specified in a ``port-mapping``. | +---------------+-------------------+------------------------------------------------------+ .. note:: If the bundle contains a ``primitive``, Pacemaker will automatically map the ``control-port``, so it is not necessary to specify that port in a ``port-mapping``. .. index: pair: XML element; storage pair: XML element; storage-mapping single: resource; bundle .. _s-bundle-storage: Bundle Storage Properties _________________________ A bundle may optionally contain one ``storage`` element. A ``storage`` element has no properties of its own, but may contain one or more ``storage-mapping`` elements. .. table:: **Attributes of a storage-mapping Element** +-----------------+---------+-------------------------------------------------------------+ | Attribute | Default | Description | +=================+=========+=============================================================+ | id | | .. index:: | | | | single: storage-mapping; attribute, id | | | | single: attribute; id (storage-mapping) | | | | single: id; storage-mapping attribute | | | | | | | | A unique name for the storage mapping (required) | +-----------------+---------+-------------------------------------------------------------+ | source-dir | | .. index:: | | | | single: storage-mapping; attribute, source-dir | | | | single: attribute; source-dir (storage-mapping) | | | | single: source-dir; storage-mapping attribute | | | | | | | | The absolute path on the host's filesystem that will be | | | | mapped into the container. Exactly one of ``source-dir`` | | | | and ``source-dir-root`` must be specified in a | | | | ``storage-mapping``. | +-----------------+---------+-------------------------------------------------------------+ | source-dir-root | | .. index:: | | | | single: storage-mapping; attribute, source-dir-root | | | | single: attribute; source-dir-root (storage-mapping) | | | | single: source-dir-root; storage-mapping attribute | | | | | | | | The start of a path on the host's filesystem that will | | | | be mapped into the container, using a different | | | | subdirectory on the host for each container instance. | | | | The subdirectory will be named the same as the | | | | :ref:`replica name `. | | | | Exactly one of ``source-dir`` and ``source-dir-root`` | | | | must be specified in a ``storage-mapping``. | +-----------------+---------+-------------------------------------------------------------+ | target-dir | | .. index:: | | | | single: storage-mapping; attribute, target-dir | | | | single: attribute; target-dir (storage-mapping) | | | | single: target-dir; storage-mapping attribute | | | | | | | | The path name within the container where the host | | | | storage will be mapped (required) | +-----------------+---------+-------------------------------------------------------------+ | options | | .. index:: | | | | single: storage-mapping; attribute, options | | | | single: attribute; options (storage-mapping) | | | | single: options; storage-mapping attribute | | | | | | | | A comma-separated list of file system mount | | | | options to use when mapping the storage | +-----------------+---------+-------------------------------------------------------------+ .. note:: Pacemaker does not define the behavior if the source directory does not already exist on the host. However, it is expected that the container technology and/or its resource agent will create the source directory in that case. .. note:: If the bundle contains a ``primitive``, Pacemaker will automatically map the equivalent of ``source-dir=/etc/pacemaker/authkey target-dir=/etc/pacemaker/authkey`` and ``source-dir-root=/var/log/pacemaker/bundles target-dir=/var/log`` into the container, so it is not necessary to specify those paths in a ``storage-mapping``. .. important:: The ``PCMK_authkey_location`` environment variable must not be set to anything other than the default of ``/etc/pacemaker/authkey`` on any node in the cluster. .. important:: If SELinux is used in enforcing mode on the host, you must ensure the container is allowed to use any storage you mount into it. For Docker and podman bundles, adding "Z" to the mount options will create a container-specific label for the mount that allows the container access. .. index:: single: resource; bundle Bundle Primitive ________________ A bundle may optionally contain one :ref:`primitive ` resource. The primitive may have operations, instance attributes, and meta-attributes defined, as usual. If a bundle contains a primitive resource, the container image must include the Pacemaker Remote daemon, and at least one of ``ip-range-start`` or ``control-port`` must be configured in the bundle. Pacemaker will create an implicit **ocf:pacemaker:remote** resource for the connection, launch Pacemaker Remote within the container, and monitor and manage the primitive resource via Pacemaker Remote. If the bundle has more than one container instance (replica), the primitive resource will function as an implicit :ref:`clone ` -- a :ref:`promotable clone ` if the bundle has ``promoted-max`` greater than zero. .. note:: If you want to pass environment variables to a bundle's Pacemaker Remote connection or primitive, you have two options: * Environment variables whose value is the same regardless of the underlying host may be set using the container element's ``options`` attribute. * If you want variables to have host-specific values, you can use the :ref:`storage-mapping ` element to map a file on the host as - ``/etc/pacemaker/pcmk-init.env`` in the container. Pacemaker Remote will parse - this file as a shell-like format, with variables set as NAME=VALUE, ignoring - blank lines and comments starting with "#". + ``/etc/pacemaker/pcmk-init.env`` in the container *(since 2.0.3)*. + Pacemaker Remote will parse this file as a shell-like format, with + variables set as NAME=VALUE, ignoring blank lines and comments starting + with "#". .. important:: When a bundle has a ``primitive``, Pacemaker on all cluster nodes must be able to contact Pacemaker Remote inside the bundle's containers. * The containers must have an accessible network (for example, ``network`` should not be set to "none" with a ``primitive``). * The default, using a distinct network space inside the container, works in combination with ``ip-range-start``. Any firewall must allow access from all cluster nodes to the ``control-port`` on the container IPs. * If the container shares the host's network space (for example, by setting ``network`` to "host"), a unique ``control-port`` should be specified for each bundle. Any firewall must allow access from all cluster nodes to the ``control-port`` on all cluster and remote node IPs. .. index:: single: resource; bundle .. _s-bundle-attributes: Bundle Node Attributes ______________________ If the bundle has a ``primitive``, the primitive's resource agent may want to set node attributes such as :ref:`promotion scores `. However, with containers, it is not apparent which node should get the attribute. If the container uses shared storage that is the same no matter which node the container is hosted on, then it is appropriate to use the promotion score on the bundle node itself. On the other hand, if the container uses storage exported from the underlying host, then it may be more appropriate to use the promotion score on the underlying host. Since this depends on the particular situation, the ``container-attribute-target`` resource meta-attribute allows the user to specify which approach to use. If it is set to ``host``, then user-defined node attributes will be checked on the underlying host. If it is anything else, the local node (in this case the bundle node) is used as usual. This only applies to user-defined attributes; the cluster will always check the local node for cluster-defined attributes such as ``#uname``. If ``container-attribute-target`` is ``host``, the cluster will pass additional environment variables to the primitive's resource agent that allow it to set node attributes appropriately: ``CRM_meta_container_attribute_target`` (identical to the meta-attribute value) and ``CRM_meta_physical_host`` (the name of the underlying host). .. note:: When called by a resource agent, the ``attrd_updater`` and ``crm_attribute`` commands will automatically check those environment variables and set attributes appropriately. .. index:: single: resource; bundle Bundle Meta-Attributes ______________________ Any meta-attribute set on a bundle will be inherited by the bundle's primitive and any resources implicitly created by Pacemaker for the bundle. This includes options such as ``priority``, ``target-role``, and ``is-managed``. See :ref:`resource_options` for more information. Limitations of Bundles ______________________ Restarting pacemaker while a bundle is unmanaged or the cluster is in maintenance mode may cause the bundle to fail. Bundles may not be explicitly cloned or included in groups. This includes the bundle's primitive and any resources implicitly created by Pacemaker for the bundle. (If ``replicas`` is greater than 1, the bundle will behave like a clone implicitly.) Bundles do not have instance attributes, utilization attributes, or operations, though a bundle's primitive may have them. A bundle with a primitive can run on a Pacemaker Remote node only if the bundle uses a distinct ``control-port``. .. [#] Of course, the service must support running multiple instances. .. [#] These are historical terms that will eventually be replaced, but the extensive use of them and the need for backward compatibility makes it a long process. You may see examples using a **master** tag instead of a **clone** tag with the **promotable** meta-attribute set to **true**; the **master** tag is supported, but deprecated, and will be removed in a future version. You may also see such services referred to as *multi-state* or *stateful*; these means the same thing as *promotable*. .. [#] Docker is a trademark of Docker, Inc. No endorsement by or association with Docker, Inc. is implied. diff --git a/doc/sphinx/Pacemaker_Explained/alerts.rst b/doc/sphinx/Pacemaker_Explained/alerts.rst index 32afcc6c59..7db6a0d6df 100644 --- a/doc/sphinx/Pacemaker_Explained/alerts.rst +++ b/doc/sphinx/Pacemaker_Explained/alerts.rst @@ -1,496 +1,497 @@ .. index:: single: alert single: resource; alert single: node; alert single: fencing; alert pair: XML element; alert pair: XML element; alerts Alerts ------ *Alerts* may be configured to take some external action when a cluster event occurs (node failure, resource starting or stopping, etc.). .. index:: pair: alert; agent Alert Agents ############ As with resource agents, the cluster calls an external program (an *alert agent*) to handle alerts. The cluster passes information about the event to the agent via environment variables. Agents can do anything desired with this information (send an e-mail, log to a file, update a monitoring system, etc.). .. topic:: Simple alert configuration .. code-block:: xml In the example above, the cluster will call ``my-script.sh`` for each event. Multiple alert agents may be configured; the cluster will call all of them for each event. Alert agents will be called only on cluster nodes. They will be called for events involving Pacemaker Remote nodes, but they will never be called *on* those nodes. .. index:: single: alert; recipient pair: XML element; recipient Alert Recipients ################ Usually, alerts are directed towards a recipient. Thus, each alert may be additionally configured with one or more recipients. The cluster will call the agent separately for each recipient. .. topic:: Alert configuration with recipient .. code-block:: xml In the above example, the cluster will call ``my-script.sh`` for each event, passing the recipient ``some-address`` as an environment variable. The recipient may be anything the alert agent can recognize -- an IP address, an e-mail address, a file name, whatever the particular agent supports. .. index:: single: alert; meta-attributes single: meta-attribute; alert meta-attributes Alert Meta-Attributes ##################### As with resource agents, meta-attributes can be configured for alert agents to affect how Pacemaker calls them. .. table:: **Meta-Attributes of an Alert** +------------------+---------------+-----------------------------------------------------+ | Meta-Attribute | Default | Description | +==================+===============+=====================================================+ | timestamp-format | %H:%M:%S.%06N | .. index:: | | | | single: alert; meta-attribute, timestamp-format | | | | single: meta-attribute; timestamp-format (alert) | | | | single: timestamp-format; alert meta-attribute | | | | | | | | Format the cluster will use when sending the | | | | event's timestamp to the agent. This is a string as | | | | used with the ``date(1)`` command. | +------------------+---------------+-----------------------------------------------------+ | timeout | 30s | .. index:: | | | | single: alert; meta-attribute, timeout | | | | single: meta-attribute; timeout (alert) | | | | single: timeout; alert meta-attribute | | | | | | | | If the alert agent does not complete within this | | | | amount of time, it will be terminated. | +------------------+---------------+-----------------------------------------------------+ Meta-attributes can be configured per alert agent and/or per recipient. .. topic:: Alert configuration with meta-attributes .. code-block:: xml In the above example, the ``my-script.sh`` will get called twice for each event, with each call using a 15-second timeout. One call will be passed the recipient ``someuser@example.com`` and a timestamp in the format ``%D %H:%M``, while the other call will be passed the recipient ``otheruser@example.com`` and a timestamp in the format ``%c``. .. index:: single: alert; instance attributes single: instance attribute; alert instance attributes Alert Instance Attributes ######################### As with resource agents, agent-specific configuration values may be configured as instance attributes. These will be passed to the agent as additional environment variables. The number, names and allowed values of these instance attributes are completely up to the particular agent. .. topic:: Alert configuration with instance attributes .. code-block:: xml .. index:: single: alert; filters pair: XML element; select pair: XML element; select_nodes pair: XML element; select_fencing pair: XML element; select_resources pair: XML element; select_attributes pair: XML element; attribute Alert Filters ############# By default, an alert agent will be called for node events, fencing events, and resource events. An agent may choose to ignore certain types of events, but there is still the overhead of calling it for those events. To eliminate that overhead, you may select which types of events the agent should receive. .. topic:: Alert configuration to receive only node events and fencing events .. code-block:: xml The possible options within `` Node attribute alerts are currently considered experimental. Alerts may be limited to attributes set via ``attrd_updater``, and agents may be called multiple times with the same attribute value. .. index:: single: alert; sample agents Using the Sample Alert Agents ############################# Pacemaker provides several sample alert agents, installed in ``/usr/share/pacemaker/alerts`` by default. While these sample scripts may be copied and used as-is, they are provided mainly as templates to be edited to suit your purposes. See their source code for the full set of instance attributes they support. .. topic:: Sending cluster events as SNMP traps .. code-block:: xml .. topic:: Sending cluster events as e-mails .. code-block:: xml Writing an Alert Agent ###################### .. index:: single: alert; environment variables single: environment variable; alert agents .. table:: **Environment variables passed to alert agents** +---------------------------+----------------------------------------------------------------+ | Environment Variable | Description | +===========================+================================================================+ | CRM_alert_kind | .. index:: | | | single:environment variable; CRM_alert_kind | | | single:CRM_alert_kind | | | | | | The type of alert (``node``, ``fencing``, ``resource``, or | | | ``attribute``) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_node | .. index:: | | | single:environment variable; CRM_alert_node | | | single:CRM_alert_node | | | | | | Name of affected node | +---------------------------+----------------------------------------------------------------+ | CRM_alert_node_sequence | .. index:: | | | single:environment variable; CRM_alert_sequence | | | single:CRM_alert_sequence | | | | | | A sequence number increased whenever an alert is being issued | | | on the local node, which can be used to reference the order in | | | which alerts have been issued by Pacemaker. An alert for an | | | event that happened later in time reliably has a higher | | | sequence number than alerts for earlier events. | | | | | | Be aware that this number has no cluster-wide meaning. | +---------------------------+----------------------------------------------------------------+ | CRM_alert_recipient | .. index:: | | | single:environment variable; CRM_alert_recipient | | | single:CRM_alert_recipient | | | | | | The configured recipient | +---------------------------+----------------------------------------------------------------+ | CRM_alert_timestamp | .. index:: | | | single:environment variable; CRM_alert_timestamp | | | single:CRM_alert_timestamp | | | | | | A timestamp created prior to executing the agent, in the | | | format specified by the ``timestamp-format`` meta-attribute. | | | This allows the agent to have a reliable, high-precision time | | | of when the event occurred, regardless of when the agent | | | itself was invoked (which could potentially be delayed due to | | | system load, etc.). | +---------------------------+----------------------------------------------------------------+ | CRM_alert_timestamp_epoch | .. index:: | | | single:environment variable; CRM_alert_timestamp_epoch | | | single:CRM_alert_timestamp_epoch | | | | | | The same time as ``CRM_alert_timestamp``, expressed as the | | | integer number of seconds since January 1, 1970. This (along | | | with ``CRM_alert_timestamp_usec``) can be useful for alert | | | agents that need to format time in a specific way rather than | | | let the user configure it. | +---------------------------+----------------------------------------------------------------+ | CRM_alert_timestamp_usec | .. index:: | | | single:environment variable; CRM_alert_timestamp_usec | | | single:CRM_alert_timestamp_usec | | | | | | The same time as ``CRM_alert_timestamp``, expressed as the | | | integer number of microseconds since | | | ``CRM_alert_timestamp_epoch``. | +---------------------------+----------------------------------------------------------------+ | CRM_alert_version | .. index:: | | | single:environment variable; CRM_alert_version | | | single:CRM_alert_version | | | | | | The version of Pacemaker sending the alert | +---------------------------+----------------------------------------------------------------+ | CRM_alert_desc | .. index:: | | | single:environment variable; CRM_alert_desc | | | single:CRM_alert_desc | | | | | | Detail about event. For ``node`` alerts, this is the node's | | | current state (``member`` or ``lost``). For ``fencing`` | | | alerts, this is a summary of the requested fencing operation, | | | including origin, target, and fencing operation error code, if | | | any. For ``resource`` alerts, this is a readable string | | | equivalent of ``CRM_alert_status``. | +---------------------------+----------------------------------------------------------------+ | CRM_alert_nodeid | .. index:: | | | single:environment variable; CRM_alert_nodeid | | | single:CRM_alert_nodeid | | | | | | ID of node whose status changed (provided with ``node`` alerts | | | only) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_rc | .. index:: | | | single:environment variable; CRM_alert_rc | | | single:CRM_alert_rc | | | | | | The numerical return code of the fencing or resource operation | | | (provided with ``fencing`` and ``resource`` alerts only) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_task | .. index:: | | | single:environment variable; CRM_alert_task | | | single:CRM_alert_task | | | | | | The requested fencing or resource operation (provided with | | | ``fencing`` and ``resource`` alerts only) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_exec_time | .. index:: | | | single:environment variable; CRM_alert_exec_time | | | single:CRM_alert_exec_time | | | | | | The (wall-clock) time, in milliseconds, that it took to | | | execute the action. If the action timed out, | | | ``CRM_alert_status`` will be 2, ``CRM_alert_desc`` will be | | | "Timed Out", and this value will be the action timeout. May | | | not be supported on all platforms. (``resource`` alerts only) | + | | *(since 2.0.1)* | +---------------------------+----------------------------------------------------------------+ | CRM_alert_interval | .. index:: | | | single:environment variable; CRM_alert_interval | | | single:CRM_alert_interval | | | | | | The interval of the resource operation (``resource`` alerts | | | only) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_rsc | .. index:: | | | single:environment variable; CRM_alert_rsc | | | single:CRM_alert_rsc | | | | | | The name of the affected resource (``resource`` alerts only) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_status | .. index:: | | | single:environment variable; CRM_alert_status | | | single:CRM_alert_status | | | | | | A numerical code used by Pacemaker to represent the operation | | | result (``resource`` alerts only) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_target_rc | .. index:: | | | single:environment variable; CRM_alert_target_rc | | | single:CRM_alert_target_rc | | | | | | The expected numerical return code of the operation | | | (``resource`` alerts only) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_attribute_name | .. index:: | | | single:environment variable; CRM_alert_attribute_name | | | single:CRM_alert_attribute_name | | | | | | The name of the node attribute that changed (``attribute`` | | | alerts only) | +---------------------------+----------------------------------------------------------------+ | CRM_alert_attribute_value | .. index:: | | | single:environment variable; CRM_alert_attribute_value | | | single:CRM_alert_attribute_value | | | | | | The new value of the node attribute that changed | | | (``attribute`` alerts only) | +---------------------------+----------------------------------------------------------------+ Special concerns when writing alert agents: * Alert agents may be called with no recipient (if none is configured), so the agent must be able to handle this situation, even if it only exits in that case. (Users may modify the configuration in stages, and add a recipient later.) * If more than one recipient is configured for an alert, the alert agent will be called once per recipient. If an agent is not able to run concurrently, it should be configured with only a single recipient. The agent is free, however, to interpret the recipient as a list. * When a cluster event occurs, all alerts are fired off at the same time as separate processes. Depending on how many alerts and recipients are configured, and on what is done within the alert agents, a significant load burst may occur. The agent could be written to take this into consideration, for example by queueing resource-intensive actions into some other instance, instead of directly executing them. * Alert agents are run as the ``hacluster`` user, which has a minimal set of permissions. If an agent requires additional privileges, it is recommended to configure ``sudo`` to allow the agent to run the necessary commands as another user with the appropriate privileges. * As always, take care to validate and sanitize user-configured parameters, such as ``CRM_alert_timestamp`` (whose content is specified by the user-configured ``timestamp-format``), ``CRM_alert_recipient,`` and all instance attributes. Mostly this is needed simply to protect against configuration errors, but if some user can modify the CIB without having ``hacluster``-level access to the cluster nodes, it is a potential security concern as well, to avoid the possibility of code injection. .. note:: **ocf:pacemaker:ClusterMon compatibility** The alerts interface is designed to be backward compatible with the external scripts interface used by the ``ocf:pacemaker:ClusterMon`` resource, which is now deprecated. To preserve this compatibility, the environment variables passed to alert agents are available prepended with ``CRM_notify_`` as well as ``CRM_alert_``. One break in compatibility is that ``ClusterMon`` ran external scripts as the ``root`` user, while alert agents are run as the ``hacluster`` user. diff --git a/doc/sphinx/Pacemaker_Explained/options.rst b/doc/sphinx/Pacemaker_Explained/options.rst index e78172a356..7deca616d5 100644 --- a/doc/sphinx/Pacemaker_Explained/options.rst +++ b/doc/sphinx/Pacemaker_Explained/options.rst @@ -1,612 +1,612 @@ Cluster-Wide Configuration -------------------------- .. index:: pair: XML element; cib pair: XML element; configuration Configuration Layout #################### The cluster is defined by the Cluster Information Base (CIB), which uses XML notation. The simplest CIB, an empty one, looks like this: .. topic:: An empty configuration .. code-block:: xml The empty configuration above contains the major sections that make up a CIB: * ``cib``: The entire CIB is enclosed with a ``cib`` element. Certain fundamental settings are defined as attributes of this element. * ``configuration``: This section -- the primary focus of this document -- contains traditional configuration information such as what resources the cluster serves and the relationships among them. * ``crm_config``: cluster-wide configuration options * ``nodes``: the machines that host the cluster * ``resources``: the services run by the cluster * ``constraints``: indications of how resources should be placed * ``status``: This section contains the history of each resource on each node. Based on this data, the cluster can construct the complete current state of the cluster. The authoritative source for this section is the local executor (pacemaker-execd process) on each cluster node, and the cluster will occasionally repopulate the entire section. For this reason, it is never written to disk, and administrators are advised against modifying it in any way. In this document, configuration settings will be described as properties or options based on how they are defined in the CIB: * Properties are XML attributes of an XML element. * Options are name-value pairs expressed as ``nvpair`` child elements of an XML element. Normally, you will use command-line tools that abstract the XML, so the distinction will be unimportant; both properties and options are cluster settings you can tweak. CIB Properties ############## Certain settings are defined by CIB properties (that is, attributes of the ``cib`` tag) rather than with the rest of the cluster configuration in the ``configuration`` section. The reason is simply a matter of parsing. These options are used by the configuration database which is, by design, mostly ignorant of the content it holds. So the decision was made to place them in an easy-to-find location. .. table:: **CIB Properties** +------------------+-----------------------------------------------------------+ | Attribute | Description | +==================+===========================================================+ | admin_epoch | .. index:: | | | pair: admin_epoch; cib | | | | | | When a node joins the cluster, the cluster performs a | | | check to see which node has the best configuration. It | | | asks the node with the highest (``admin_epoch``, | | | ``epoch``, ``num_updates``) tuple to replace the | | | configuration on all the nodes -- which makes setting | | | them, and setting them correctly, very important. | | | ``admin_epoch`` is never modified by the cluster; you can | | | use this to make the configurations on any inactive nodes | | | obsolete. | | | | | | .. warning:: | | | Never set this value to zero. In such cases, the | | | cluster cannot tell the difference between your | | | configuration and the "empty" one used when nothing is | | | found on disk. | +------------------+-----------------------------------------------------------+ | epoch | .. index:: | | | pair: epoch; cib | | | | | | The cluster increments this every time the configuration | | | is updated (usually by the administrator). | +------------------+-----------------------------------------------------------+ | num_updates | .. index:: | | | pair: num_updates; cib | | | | | | The cluster increments this every time the configuration | | | or status is updated (usually by the cluster) and resets | | | it to 0 when epoch changes. | +------------------+-----------------------------------------------------------+ | validate-with | .. index:: | | | pair: validate-with; cib | | | | | | Determines the type of XML validation that will be done | | | on the configuration. If set to ``none``, the cluster | | | will not verify that updates conform to the DTD (nor | | | reject ones that don't). | +------------------+-----------------------------------------------------------+ | cib-last-written | .. index:: | | | pair: cib-last-written; cib | | | | | | Indicates when the configuration was last written to | | | disk. Maintained by the cluster; for informational | | | purposes only. | +------------------+-----------------------------------------------------------+ | have-quorum | .. index:: | | | pair: have-quorum; cib | | | | | | Indicates if the cluster has quorum. If false, this may | | | mean that the cluster cannot start resources or fence | | | other nodes (see ``no-quorum-policy`` below). Maintained | | | by the cluster. | +------------------+-----------------------------------------------------------+ | dc-uuid | .. index:: | | | pair: dc-uuid; cib | | | | | | Indicates which cluster node is the current leader. Used | | | by the cluster when placing resources and determining the | | | order of some events. Maintained by the cluster. | +------------------+-----------------------------------------------------------+ .. _cluster_options: Cluster Options ############### Cluster options, as you might expect, control how the cluster behaves when confronted with various situations. They are grouped into sets within the ``crm_config`` section. In advanced configurations, there may be more than one set. (This will be described later in the chapter on :ref:`rules` where we will show how to have the cluster use different sets of options during working hours than during weekends.) For now, we will describe the simple case where each option is present at most once. You can obtain an up-to-date list of cluster options, including their default values, by running the ``man pacemaker-schedulerd`` and ``man pacemaker-controld`` commands. .. table:: **Cluster Options** +---------------------------+---------+----------------------------------------------------+ | Option | Default | Description | +===========================+=========+====================================================+ | cluster-name | | .. index:: | | | | pair: cluster option; cluster-name | | | | | | | | An (optional) name for the cluster as a whole. | | | | This is mostly for users' convenience for use | | | | as desired in administration, but this can be | | | | used in the Pacemaker configuration in | | | | :ref:`rules` (as the ``#cluster-name`` | | | | :ref:`node attribute | | | | `. It may | | | | also be used by higher-level tools when | | | | displaying cluster information, and by | | | | certain resource agents (for example, the | | | | ``ocf:heartbeat:GFS2`` agent stores the | | | | cluster name in filesystem meta-data). | +---------------------------+---------+----------------------------------------------------+ | dc-version | | .. index:: | | | | pair: cluster option; dc-version | | | | | | | | Version of Pacemaker on the cluster's DC. | | | | Determined automatically by the cluster. Often | | | | includes the hash which identifies the exact | | | | Git changeset it was built from. Used for | | | | diagnostic purposes. | +---------------------------+---------+----------------------------------------------------+ | cluster-infrastructure | | .. index:: | | | | pair: cluster option; cluster-infrastructure | | | | | | | | The messaging stack on which Pacemaker is | | | | currently running. Determined automatically by | | | | the cluster. Used for informational and | | | | diagnostic purposes. | +---------------------------+---------+----------------------------------------------------+ | no-quorum-policy | stop | .. index:: | | | | pair: cluster option; no-quorum-policy | | | | | | | | What to do when the cluster does not have | | | | quorum. Allowed values: | | | | | | | | * ``ignore:`` continue all resource management | | | | * ``freeze:`` continue resource management, but | | | | don't recover resources from nodes not in the | | | | affected partition | | | | * ``stop:`` stop all resources in the affected | | | | cluster partition | | | | * ``demote:`` demote promotable resources and | | | | stop all other resources in the affected | - | | | cluster partition | + | | | cluster partition *(since 2.0.5)* | | | | * ``suicide:`` fence all nodes in the affected | | | | cluster partition | +---------------------------+---------+----------------------------------------------------+ | batch-limit | 0 | .. index:: | | | | pair: cluster option; batch-limit | | | | | | | | The maximum number of actions that the cluster | | | | may execute in parallel across all nodes. The | | | | "correct" value will depend on the speed and | | | | load of your network and cluster nodes. If zero, | | | | the cluster will impose a dynamically calculated | | | | limit only when any node has high load. | +---------------------------+---------+----------------------------------------------------+ | migration-limit | -1 | .. index:: | | | | pair: cluster option; migration-limit | | | | | | | | The number of | | | | :ref:`live migration ` actions | | | | that the cluster is allowed to execute in | | | | parallel on a node. A value of -1 means | | | | unlimited. | +---------------------------+---------+----------------------------------------------------+ | symmetric-cluster | true | .. index:: | | | | pair: cluster option; symmetric-cluster | | | | | | | | Whether resources can run on any node by default | | | | (if false, a resource is allowed to run on a | | | | node only if a | | | | :ref:`location constraint ` | | | | enables it) | +---------------------------+---------+----------------------------------------------------+ | stop-all-resources | false | .. index:: | | | | pair: cluster option; stop-all-resources | | | | | | | | Whether all resources should be disallowed from | | | | running (can be useful during maintenance) | +---------------------------+---------+----------------------------------------------------+ | stop-orphan-resources | true | .. index:: | | | | pair: cluster option; stop-orphan-resources | | | | | | | | Whether resources that have been deleted from | | | | the configuration should be stopped. This value | | | | takes precedence over ``is-managed`` (that is, | | | | even unmanaged resources will be stopped when | | | | orphaned if this value is ``true`` | +---------------------------+---------+----------------------------------------------------+ | stop-orphan-actions | true | .. index:: | | | | pair: cluster option; stop-orphan-actions | | | | | | | | Whether recurring :ref:`operations ` | | | | that have been deleted from the configuration | | | | should be cancelled | +---------------------------+---------+----------------------------------------------------+ | start-failure-is-fatal | true | .. index:: | | | | pair: cluster option; start-failure-is-fatal | | | | | | | | Whether a failure to start a resource on a | | | | particular node prevents further start attempts | | | | on that node? If ``false``, the cluster will | | | | decide whether the node is still eligible based | | | | on the resource's current failure count and | | | | :ref:`migration-threshold `. | +---------------------------+---------+----------------------------------------------------+ | enable-startup-probes | true | .. index:: | | | | pair: cluster option; enable-startup-probes | | | | | | | | Whether the cluster should check the | | | | pre-existing state of resources when the cluster | | | | starts | +---------------------------+---------+----------------------------------------------------+ | maintenance-mode | false | .. index:: | | | | pair: cluster option; maintenance-mode | | | | | | | | Whether the cluster should refrain from | | | | monitoring, starting and stopping resources | +---------------------------+---------+----------------------------------------------------+ | stonith-enabled | true | .. index:: | | | | pair: cluster option; stonith-enabled | | | | | | | | Whether the cluster is allowed to fence nodes | | | | (for example, failed nodes and nodes with | | | | resources that can't be stopped. | | | | | | | | If true, at least one fence device must be | | | | configured before resources are allowed to run. | | | | | | | | If false, unresponsive nodes are immediately | | | | assumed to be running no resources, and resource | | | | recovery on online nodes starts without any | | | | further protection (which can mean *data loss* | | | | if the unresponsive node still accesses shared | | | | storage, for example). See also the | | | | :ref:`requires ` resource | | | | meta-attribute. | +---------------------------+---------+----------------------------------------------------+ | stonith-action | reboot | .. index:: | | | | pair: cluster option; stonith-action | | | | | | | | Action the cluster should send to the fence agent | | | | when a node must be fenced. Allowed values are | | | | ``reboot``, ``off``, and (for legacy agents only) | | | | ``poweroff``. | +---------------------------+---------+----------------------------------------------------+ | stonith-timeout | 60s | .. index:: | | | | pair: cluster option; stonith-timeout | | | | | | | | How long to wait for ``on``, ``off``, and | | | | ``reboot`` fence actions to complete by default. | +---------------------------+---------+----------------------------------------------------+ | stonith-max-attempts | 10 | .. index:: | | | | pair: cluster option; stonith-max-attempts | | | | | | | | How many times fencing can fail for a target | | | | before the cluster will no longer immediately | | | | re-attempt it. | +---------------------------+---------+----------------------------------------------------+ | stonith-watchdog-timeout | 0 | .. index:: | | | | pair: cluster option; stonith-watchdog-timeout | | | | | | | | If nonzero, and the cluster detects | | | | ``have-watchdog`` as ``true``, then watchdog-based | | | | self-fencing will be performed via SBD when | | | | fencing is required, without requiring a fencing | | | | resource explicitly configured. | | | | | | | | If this is set to a positive value, unseen nodes | | | | are assumed to self-fence within this much time. | | | | | | | | .. warning:: | | | | It must be ensured that this value is larger | | | | than the ``SBD_WATCHDOG_TIMEOUT`` environment | | | | variable on all nodes. Pacemaker verifies the | | | | settings individually on all nodes and prevents | | | | startup or shuts down if configured wrongly on | | | | the fly. It is strongly recommended that | | | | ``SBD_WATCHDOG_TIMEOUT`` be set to the same | | | | value on all nodes. | | | | | | | | If this is set to a negative value, and | | | | ``SBD_WATCHDOG_TIMEOUT`` is set, twice that value | | | | will be used. | | | | | | | | .. warning:: | | | | In this case, it is essential (and currently | | | | not verified by pacemaker) that | | | | ``SBD_WATCHDOG_TIMEOUT`` is set to the same | | | | value on all nodes. | +---------------------------+---------+----------------------------------------------------+ | concurrent-fencing | false | .. index:: | | | | pair: cluster option; concurrent-fencing | | | | | | | | Whether the cluster is allowed to initiate multiple| | | | fence actions concurrently | +---------------------------+---------+----------------------------------------------------+ | fence-reaction | stop | .. index:: | | | | pair: cluster option; fence-reaction | | | | | | | | How should a cluster node react if notified of its | | | | own fencing? A cluster node may receive | | | | notification of its own fencing if fencing is | | | | misconfigured, or if fabric fencing is in use that | | | | doesn't cut cluster communication. Allowed values | | | | are ``stop`` to attempt to immediately stop | | | | pacemaker and stay stopped, or ``panic`` to | | | | attempt to immediately reboot the local node, | | | | falling back to stop on failure. The default is | | | | likely to be changed to ``panic`` in a future | | | | release. *(since 2.0.3)* | +---------------------------+---------+----------------------------------------------------+ | priority-fencing-delay | 0 | .. index:: | | | | pair: cluster option; priority-fencing-delay | | | | | | | | Apply this delay to any fencing targeting the lost | | | | nodes with the highest total resource priority in | | | | case we don't have the majority of the nodes in | | | | our cluster partition, so that the more | | | | significant nodes potentially win any fencing | | | | match (especially meaningful in a split-brain of a | | | | 2-node cluster). A promoted resource instance | | | | takes the resource's priority plus 1 if the | | | | resource's priority is not 0. Any static or random | | | | delays introduced by ``pcmk_delay_base`` and | | | | ``pcmk_delay_max`` configured for the | | | | corresponding fencing resources will be added to | | | | this delay. This delay should be significantly | | | | greater than (safely twice) the maximum delay from | | | | those parameters. *(since 2.0.4)* | +---------------------------+---------+----------------------------------------------------+ | cluster-delay | 60s | .. index:: | | | | pair: cluster option; cluster-delay | | | | | | | | Estimated maximum round-trip delay over the | | | | network (excluding action execution). If the DC | | | | requires an action to be executed on another node, | | | | it will consider the action failed if it does not | | | | get a response from the other node in this time | | | | (after considering the action's own timeout). The | | | | "correct" value will depend on the speed and load | | | | of your network and cluster nodes. | +---------------------------+---------+----------------------------------------------------+ | dc-deadtime | 20s | .. index:: | | | | pair: cluster option; dc-deadtime | | | | | | | | How long to wait for a response from other nodes | | | | during startup. The "correct" value will depend on | | | | the speed/load of your network and the type of | | | | switches used. | +---------------------------+---------+----------------------------------------------------+ | cluster-ipc-limit | 500 | .. index:: | | | | pair: cluster option; cluster-ipc-limit | | | | | | | | The maximum IPC message backlog before one cluster | | | | daemon will disconnect another. This is of use in | | | | large clusters, for which a good value is the | | | | number of resources in the cluster multiplied by | | | | the number of nodes. The default of 500 is also | | | | the minimum. Raise this if you see | | | | "Evicting client" messages for cluster daemon PIDs | | | | in the logs. | +---------------------------+---------+----------------------------------------------------+ | pe-error-series-max | -1 | .. index:: | | | | pair: cluster option; pe-error-series-max | | | | | | | | The number of scheduler inputs resulting in errors | | | | to save. Used when reporting problems. A value of | | | | -1 means unlimited (report all). | +---------------------------+---------+----------------------------------------------------+ | pe-warn-series-max | -1 | .. index:: | | | | pair: cluster option; pe-warn-series-max | | | | | | | | The number of scheduler inputs resulting in | | | | warnings to save. Used when reporting problems. A | | | | value of -1 means unlimited (report all). | +---------------------------+---------+----------------------------------------------------+ | pe-input-series-max | -1 | .. index:: | | | | pair: cluster option; pe-input-series-max | | | | | | | | The number of "normal" scheduler inputs to save. | | | | Used when reporting problems. A value of -1 means | | | | unlimited (report all). | +---------------------------+---------+----------------------------------------------------+ | enable-acl | false | .. index:: | | | | pair: cluster option; enable-acl | | | | | | | | Whether :ref:`acl` should be used to authorize | | | | modifications to the CIB | +---------------------------+---------+----------------------------------------------------+ | placement-strategy | default | .. index:: | | | | pair: cluster option; placement-strategy | | | | | | | | How the cluster should allocate resources to nodes | | | | (see :ref:`utilization`). Allowed values are | | | | ``default``, ``utilization``, ``balanced``, and | | | | ``minimal``. | +---------------------------+---------+----------------------------------------------------+ | node-health-strategy | none | .. index:: | | | | pair: cluster option; node-health-strategy | | | | | | | | How the cluster should react to node health | | | | attributes (see :ref:`node-health`). Allowed values| | | | are ``none``, ``migrate-on-red``, ``only-green``, | | | | ``progressive``, and ``custom``. | +---------------------------+---------+----------------------------------------------------+ | node-health-base | 0 | .. index:: | | | | pair: cluster option; node-health-base | | | | | | | | The base health score assigned to a node. Only | | | | used when ``node-health-strategy`` is | | | | ``progressive``. | +---------------------------+---------+----------------------------------------------------+ | node-health-green | 0 | .. index:: | | | | pair: cluster option; node-health-green | | | | | | | | The score to use for a node health attribute whose | | | | value is ``green``. Only used when | | | | ``node-health-strategy`` is ``progressive`` or | | | | ``custom``. | +---------------------------+---------+----------------------------------------------------+ | node-health-yellow | 0 | .. index:: | | | | pair: cluster option; node-health-yellow | | | | | | | | The score to use for a node health attribute whose | | | | value is ``yellow``. Only used when | | | | ``node-health-strategy`` is ``progressive`` or | | | | ``custom``. | +---------------------------+---------+----------------------------------------------------+ | node-health-red | 0 | .. index:: | | | | pair: cluster option; node-health-red | | | | | | | | The score to use for a node health attribute whose | | | | value is ``red``. Only used when | | | | ``node-health-strategy`` is ``progressive`` or | | | | ``custom``. | +---------------------------+---------+----------------------------------------------------+ | cluster-recheck-interval | 15min | .. index:: | | | | pair: cluster option; cluster-recheck-interval | | | | | | | | Pacemaker is primarily event-driven, and looks | | | | ahead to know when to recheck the cluster for | - | | | failure timeouts and most time-based rules. | - | | | However, it will also recheck the cluster after | - | | | this amount of inactivity. This has two goals: | - | | | rules with ``date_spec`` are only guaranteed to be | - | | | checked this often, and it also serves as a | - | | | fail-safe for certain classes of scheduler bugs. A | - | | | value of 0 disables this polling; positive values | - | | | are a time interval. | + | | | failure timeouts and most time-based rules | + | | | *(since 2.0.3)*. However, it will also recheck the | + | | | cluster after this amount of inactivity. This has | + | | | two goals: rules with ``date_spec`` are only | + | | | guaranteed to be checked this often, and it also | + | | | serves as a fail-safe for some kinds of scheduler | + | | | bugs. A value of 0 disables this polling; positive | + | | | values are a time interval. | +---------------------------+---------+----------------------------------------------------+ | shutdown-lock | false | .. index:: | | | | pair: cluster option; shutdown-lock | | | | | | | | The default of false allows active resources to be | | | | recovered elsewhere when their node is cleanly | | | | shut down, which is what the vast majority of | | | | users will want. However, some users prefer to | | | | make resources highly available only for failures, | | | | with no recovery for clean shutdowns. If this | | | | option is true, resources active on a node when it | | | | is cleanly shut down are kept "locked" to that | | | | node (not allowed to run elsewhere) until they | | | | start again on that node after it rejoins (or for | | | | at most ``shutdown-lock-limit``, if set). Stonith | | | | resources and Pacemaker Remote connections are | | | | never locked. Clone and bundle instances and the | | | | master role of promotable clones are currently | | | | never locked, though support could be added in a | | | | future release. Locks may be manually cleared | | | | using the ``--refresh`` option of ``crm_resource`` | | | | (both the resource and node must be specified; | | | | this works with remote nodes if their connection | | | | resource's ``target-role`` is set to ``Stopped``, | | | | but not if Pacemaker Remote is stopped on the | | | | remote node without disabling the connection | | | | resource). *(since 2.0.4)* | +---------------------------+---------+----------------------------------------------------+ | shutdown-lock-limit | 0 | .. index:: | | | | pair: cluster option; shutdown-lock-limit | | | | | | | | If ``shutdown-lock`` is true, and this is set to a | | | | nonzero time duration, locked resources will be | | | | allowed to start after this much time has passed | | | | since the node shutdown was initiated, even if the | | | | node has not rejoined. (This works with remote | | | | nodes only if their connection resource's | | | | ``target-role`` is set to ``Stopped``.) | | | | *(since 2.0.4)* | +---------------------------+---------+----------------------------------------------------+ | remove-after-stop | false | .. index:: | | | | pair: cluster option; remove-after-stop | | | | | | | | *Advanced Use Only:* Should the cluster remove | | | | resources from Pacemaker's executor after they are | | | | stopped? Values other than the default are, at | | | | best, poorly tested and potentially dangerous. | +---------------------------+---------+----------------------------------------------------+ | startup-fencing | true | .. index:: | | | | pair: cluster option; startup-fencing | | | | | | | | *Advanced Use Only:* Should the cluster fence | | | | unseen nodes at start-up? Setting this to false is | | | | unsafe, because the unseen nodes could be active | | | | and running resources but unreachable. | +---------------------------+---------+----------------------------------------------------+ | election-timeout | 2min | .. index:: | | | | pair: cluster option; election-timeout | | | | | | | | *Advanced Use Only:* If you need to adjust this | | | | value, it probably indicates the presence of a bug.| +---------------------------+---------+----------------------------------------------------+ | shutdown-escalation | 20min | .. index:: | | | | pair: cluster option; shutdown-escalation | | | | | | | | *Advanced Use Only:* If you need to adjust this | | | | value, it probably indicates the presence of a bug.| +---------------------------+---------+----------------------------------------------------+ | join-integration-timeout | 3min | .. index:: | | | | pair: cluster option; join-integration-timeout | | | | | | | | *Advanced Use Only:* If you need to adjust this | | | | value, it probably indicates the presence of a bug.| +---------------------------+---------+----------------------------------------------------+ | join-finalization-timeout | 30min | .. index:: | | | | pair: cluster option; join-finalization-timeout | | | | | | | | *Advanced Use Only:* If you need to adjust this | | | | value, it probably indicates the presence of a bug.| +---------------------------+---------+----------------------------------------------------+ | transition-delay | 0s | .. index:: | | | | pair: cluster option; transition-delay | | | | | | | | *Advanced Use Only:* Delay cluster recovery for | | | | the configured interval to allow for additional or | | | | related events to occur. This can be useful if | | | | your configuration is sensitive to the order in | | | | which ping updates arrive. Enabling this option | | | | will slow down cluster recovery under all | | | | conditions. | +---------------------------+---------+----------------------------------------------------+ diff --git a/doc/sphinx/Pacemaker_Explained/resources.rst b/doc/sphinx/Pacemaker_Explained/resources.rst index 7e89b4a7d6..51ba1251ec 100644 --- a/doc/sphinx/Pacemaker_Explained/resources.rst +++ b/doc/sphinx/Pacemaker_Explained/resources.rst @@ -1,984 +1,984 @@ .. _resource: Cluster Resources ----------------- .. Convert_to_RST: [[s-resource-primitive]] == What is a Cluster Resource? == indexterm:[Resource] A resource is a service made highly available by a cluster. The simplest type of resource, a 'primitive' resource, is described in this chapter. More complex forms, such as groups and clones, are described in later chapters. Every primitive resource has a 'resource agent'. A resource agent is an external program that abstracts the service it provides and present a consistent view to the cluster. This allows the cluster to be agnostic about the resources it manages. The cluster doesn't need to understand how the resource works because it relies on the resource agent to do the right thing when given a `start`, `stop` or `monitor` command. For this reason, it is crucial that resource agents are well-tested. Typically, resource agents come in the form of shell scripts. However, they can be written using any technology (such as C, Python or Perl) that the author is comfortable with. [[s-resource-supported]] == Resource Classes == indexterm:[Resource,class] Pacemaker supports several classes of agents: * OCF * LSB * Upstart * Systemd * Service * Fencing * Nagios Plugins === Open Cluster Framework === indexterm:[Resource,OCF] indexterm:[OCF,Resources] indexterm:[Open Cluster Framework,Resources] The OCF standard footnote:[See https://github.com/ClusterLabs/OCF-spec/tree/master/ra . The Pacemaker implementation has been somewhat extended from the OCF specs.] is basically an extension of the Linux Standard Base conventions for init scripts to: * support parameters, * make them self-describing, and * make them extensible OCF specs have strict definitions of the exit codes that actions must return. footnote:[ The resource-agents source code includes the `ocf-tester` script, which can be useful in this regard. ] The cluster follows these specifications exactly, and giving the wrong exit code will cause the cluster to behave in ways you will likely find puzzling and annoying. In particular, the cluster needs to distinguish a completely stopped resource from one which is in some erroneous and indeterminate state. Parameters are passed to the resource agent as environment variables, with the special prefix +OCF_RESKEY_+. So, a parameter which the user thinks of as +ip+ will be passed to the resource agent as +OCF_RESKEY_ip+. The number and purpose of the parameters is left to the resource agent; however, the resource agent should use the `meta-data` command to advertise any that it supports. The OCF class is the most preferred as it is an industry standard, highly flexible (allowing parameters to be passed to agents in a non-positional manner) and self-describing. For more information, see the http://www.linux-ha.org/wiki/OCF_Resource_Agents[reference] and the 'Resource Agents' chapter of 'Pacemaker Administration'. === Linux Standard Base === indexterm:[Resource,LSB] indexterm:[LSB,Resources] indexterm:[Linux Standard Base,Resources] 'LSB' resource agents are more commonly known as 'init scripts'. If a full path is not given, they are assumed to be located in +/etc/init.d+. Commonly, they are provided by the OS distribution. In order to be used with a Pacemaker cluster, they must conform to the LSB specification. footnote:[ See http://refspecs.linux-foundation.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html for the LSB Spec as it relates to init scripts. ] [WARNING] ==== Many distributions or particular software packages claim LSB compliance but ship with broken init scripts. For details on how to check whether your init script is LSB-compatible, see the 'Resource Agents' chapter of 'Pacemaker Administration'. Common problematic violations of the LSB standard include: * Not implementing the +status+ operation at all * Not observing the correct exit status codes for +start+/+stop+/+status+ actions * Starting a started resource returns an error * Stopping a stopped resource returns an error ==== [IMPORTANT] ==== Remember to make sure the computer is _not_ configured to start any services at boot time -- that should be controlled by the cluster. ==== [[s-resource-supported-systemd]] === Systemd === indexterm:[Resource,Systemd] indexterm:[Systemd,Resources] Some newer distributions have replaced the old http://en.wikipedia.org/wiki/Init#SysV-style["SysV"] style of initialization daemons and scripts with an alternative called http://www.freedesktop.org/wiki/Software/systemd[Systemd]. Pacemaker is able to manage these services _if they are present_. Instead of init scripts, systemd has 'unit files'. Generally, the services (unit files) are provided by the OS distribution, but there are online guides for converting from init scripts. footnote:[For example, http://0pointer.de/blog/projects/systemd-for-admins-3.html] [IMPORTANT] ==== Remember to make sure the computer is _not_ configured to start any services at boot time -- that should be controlled by the cluster. ==== === Upstart === indexterm:[Resource,Upstart] indexterm:[Upstart,Resources] Some newer distributions have replaced the old http://en.wikipedia.org/wiki/Init#SysV-style["SysV"] style of initialization daemons (and scripts) with an alternative called http://upstart.ubuntu.com/[Upstart]. Pacemaker is able to manage these services _if they are present_. Instead of init scripts, upstart has 'jobs'. Generally, the services (jobs) are provided by the OS distribution. [IMPORTANT] ==== Remember to make sure the computer is _not_ configured to start any services at boot time -- that should be controlled by the cluster. ==== === System Services === indexterm:[Resource,System Services] indexterm:[System Service,Resources] Since there are various types of system services (+systemd+, +upstart+, and +lsb+), Pacemaker supports a special +service+ alias which intelligently figures out which one applies to a given cluster node. This is particularly useful when the cluster contains a mix of +systemd+, +upstart+, and +lsb+. In order, Pacemaker will try to find the named service as: . an LSB init script . a Systemd unit file . an Upstart job === STONITH === indexterm:[Resource,STONITH] indexterm:[STONITH,Resources] The STONITH class is used exclusively for fencing-related resources. This is discussed later in <>. === Nagios Plugins === indexterm:[Resource,Nagios Plugins] indexterm:[Nagios Plugins,Resources] Nagios Plugins footnote:[The project has two independent forks, hosted at https://www.nagios-plugins.org/ and https://www.monitoring-plugins.org/. Output from both projects' plugins is similar, so plugins from either project can be used with pacemaker.] allow us to monitor services on remote hosts. Pacemaker is able to do remote monitoring with the plugins _if they are present_. A common use case is to configure them as resources belonging to a resource container (usually a virtual machine), and the container will be restarted if any of them has failed. Another use is to configure them as ordinary resources to be used for monitoring hosts or services via the network. The supported parameters are same as the long options of the plugin. .. _primitive-resource: Resource Properties ################### .. Convert_to_RST: These values tell the cluster which resource agent to use for the resource, where to find that resource agent and what standards it conforms to. .Properties of a Primitive Resource [width="95%",cols="1m,<6",options="header",align="center"] |========================================================= |Field |Description |id |Your name for the resource indexterm:[id,Resource] indexterm:[Resource,Property,id] |class |The standard the resource agent conforms to. Allowed values: +lsb+, +nagios+, +ocf+, +service+, +stonith+, +systemd+, +upstart+ indexterm:[class,Resource] indexterm:[Resource,Property,class] |type |The name of the Resource Agent you wish to use. E.g. +IPaddr+ or +Filesystem+ indexterm:[type,Resource] indexterm:[Resource,Property,type] |provider |The OCF spec allows multiple vendors to supply the same resource agent. To use the OCF resource agents supplied by the Heartbeat project, you would specify +heartbeat+ here. indexterm:[provider,Resource] indexterm:[Resource,Property,provider] |========================================================= The XML definition of a resource can be queried with the `crm_resource` tool. For example: ---- # crm_resource --resource Email --query-xml ---- might produce: .A system resource definition ===== [source,XML] ===== [NOTE] ===== One of the main drawbacks to system services (LSB, systemd or Upstart) resources is that they do not allow any parameters! ===== //// See https://tools.ietf.org/html/rfc5737 for choice of example IP address //// .An OCF resource definition ===== [source,XML] ------- ------- ===== .. _resource_options: Resource Options ################ .. Convert_to_RST_2: Resources have two types of options: 'meta-attributes' and 'instance attributes'. Meta-attributes apply to any type of resource, while instance attributes are specific to each resource agent. === Resource Meta-Attributes === Meta-attributes are used by the cluster to decide how a resource should behave and can be easily set using the `--meta` option of the `crm_resource` command. .Meta-attributes of a Primitive Resource [width="95%",cols="2m,2,<5",options="header",align="center"] |========================================================= |Field |Default |Description |priority |0 |If not all resources can be active, the cluster will stop lower priority resources in order to keep higher priority ones active. indexterm:[priority,Resource Option] indexterm:[Resource,Option,priority] |target-role |Started a|What state should the cluster attempt to keep this resource in? Allowed values: * +Stopped:+ Force the resource to be stopped * +Started:+ Allow the resource to be started (and in the case of <>, promoted to master if appropriate) * +Slave:+ Allow the resource to be started, but only in Slave mode if the resource is <> * +Master:+ Equivalent to +Started+ indexterm:[target-role,Resource Option] indexterm:[Resource,Option,target-role] |is-managed |TRUE |Is the cluster allowed to start and stop the resource? Allowed values: +true+, +false+ indexterm:[is-managed,Resource Option] indexterm:[Resource,Option,is-managed] |maintenance |FALSE |Similar to the +maintenance-mode+ <>, but for a single resource. If true, the resource will not be started, stopped, or monitored on any node. This differs from +is-managed+ in that monitors will not be run. Allowed values: +true+, +false+ indexterm:[maintenance,Resource Option] indexterm:[Resource,Option,maintenance] .. _resource-stickiness: placeholder .. Convert_to_RST_3: |resource-stickiness |1 for individual clone instances, 0 for all other resources |A score that will be added to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. indexterm:[resource-stickiness,Resource Option] indexterm:[Resource,Option,resource-stickiness] .. _requires: placeholder .. Convert_to_RST_4: |requires |+quorum+ for resources with a +class+ of +stonith+, otherwise +unfencing+ if unfencing is active in the cluster, otherwise +fencing+ if +stonith-enabled+ is true, otherwise +quorum+ a|Conditions under which the resource can be started Allowed values: * +nothing:+ can always be started * +quorum:+ The cluster can only start this resource if a majority of the configured nodes are active * +fencing:+ The cluster can only start this resource if a majority of the configured nodes are active _and_ any failed or unknown nodes have been <> * +unfencing:+ The cluster can only start this resource if a majority of the configured nodes are active _and_ any failed or unknown nodes have been fenced _and_ only on nodes that have been <> indexterm:[requires,Resource Option] indexterm:[Resource,Option,requires] |migration-threshold |INFINITY |How many failures may occur for this resource on a node, before this node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible); by constrast, the cluster treats INFINITY (the default) as a very large but finite number. This option has an effect only if the failed operation specifies +on-fail+ as +restart+ (the default), and additionally for failed +start+ operations, if the cluster property +start-failure-is-fatal+ is +false+. indexterm:[migration-threshold,Resource Option] indexterm:[Resource,Option,migration-threshold] |failure-timeout |0 |How many seconds to wait before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. indexterm:[failure-timeout,Resource Option] indexterm:[Resource,Option,failure-timeout] |multiple-active |stop_start a|What should the cluster do if it ever finds the resource active on more than one node? Allowed values: * +block:+ mark the resource as unmanaged * +stop_only:+ stop all active instances and leave them that way * +stop_start:+ stop all active instances and start the resource in one location only indexterm:[multiple-active,Resource Option] indexterm:[Resource,Option,multiple-active] |allow-migrate |TRUE for ocf:pacemaker:remote resources, FALSE otherwise |Whether the cluster should try to "live migrate" this resource when it needs to be moved (see <>) |container-attribute-target | |Specific to bundle resources; see <> |remote-node | |The name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. +WARNING:+ This value cannot overlap with any resource or node IDs. |remote-port |3121 |If +remote-node+ is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. |remote-addr |value of +remote-node+ |If +remote-node+ is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. |remote-connect-timeout |60s |If +remote-node+ is specified, how long before a pending guest connection will time out. |========================================================= As an example of setting resource options, if you performed the following commands on an LSB Email resource: ------- # crm_resource --meta --resource Email --set-parameter priority --parameter-value 100 # crm_resource -m -r Email -p multiple-active -v block ------- the resulting resource definition might be: .An LSB resource with cluster options ===== [source,XML] ------- ------- ===== In addition to the cluster-defined meta-attributes described above, you may also configure arbitrary meta-attributes of your own choosing. Most commonly, this would be done for use in <>. For example, an IT department might define a custom meta-attribute to indicate which company department each resource is intended for. To reduce the chance of name collisions with cluster-defined meta-attributes added in the future, it is recommended to use a unique, organization-specific prefix for such attributes. [[s-resource-defaults]] === Setting Global Defaults for Resource Meta-Attributes === To set a default value for a resource option, add it to the +rsc_defaults+ section with `crm_attribute`. For example, ---- # crm_attribute --type rsc_defaults --name is-managed --update false ---- would prevent the cluster from starting or stopping any of the resources in the configuration (unless of course the individual resources were specifically enabled by having their +is-managed+ set to +true+). === Resource Instance Attributes === The resource agents of some resource classes (lsb, systemd and upstart 'not' among them) can be given parameters which determine how they behave and which instance of a service they control. If your resource agent supports parameters, you can add them with the `crm_resource` command. For example, ---- # crm_resource --resource Public-IP --set-parameter ip --parameter-value 192.0.2.2 ---- would create an entry in the resource like this: .An example OCF resource with instance attributes ===== [source,XML] ------- ------- ===== For an OCF resource, the result would be an environment variable called +OCF_RESKEY_ip+ with a value of +192.0.2.2+. The list of instance attributes supported by an OCF resource agent can be found by calling the resource agent with the `meta-data` command. The output contains an XML description of all the supported attributes, their purpose and default values. .Displaying the metadata for the Dummy resource agent template ===== ---- # export OCF_ROOT=/usr/lib/ocf # $OCF_ROOT/resource.d/pacemaker/Dummy meta-data ---- [source,XML] ------- 1.0 This is a Dummy Resource Agent. It does absolutely nothing except keep track of whether its running or not. Its purpose in life is for testing and to serve as a template for RA writers. NB: Please pay attention to the timeouts specified in the actions section below. They should be meaningful for the kind of resource the agent manages. They should be the minimum advised timeouts, but they shouldn't/cannot cover _all_ possible resource instances. So, try to be neither overly generous nor too stingy, but moderate. The minimum timeouts should never be below 10 seconds. Example stateless resource agent Location to store the resource state in. State file Fake attribute that can be changed to cause a reload Fake attribute that can be changed to cause a reload Number of seconds to sleep during operations. This can be used to test how the cluster reacts to operation timeouts. Operation sleep duration in seconds. ------- ===== .. _operation: Resource Operations ################### .. Convert_to_RST_5: indexterm:[Resource,Action] 'Operations' are actions the cluster can perform on a resource by calling the resource agent. Resource agents must support certain common operations such as start, stop, and monitor, and may implement any others. Operations may be explicitly configured for two purposes: to override defaults for options (such as timeout) that the cluster will use whenever it initiates the operation, and to run an operation on a recurring basis (for example, to monitor the resource for failure). .An OCF resource with a non-default start timeout ===== [source,XML] ------- ------- ===== Pacemaker identifies operations by a combination of name and interval, so this combination must be unique for each resource. That is, you should not configure two operations for the same resource with the same name and interval. .. _operation_properties: Operation Properties ____________________ .. Convert_to_RST_6: Operation properties may be specified directly in the +op+ element as XML attributes, or in a separate +meta_attributes+ block as +nvpair+ elements. XML attributes take precedence over +nvpair+ elements if both are specified. .Properties of an Operation [width="95%",cols="2m,3,<6",options="header",align="center"] |========================================================= |Field |Default |Description |id | |A unique name for the operation. indexterm:[id,Action Property] indexterm:[Action,Property,id] |name | |The action to perform. This can be any action supported by the agent; common values include +monitor+, +start+, and +stop+. indexterm:[name,Action Property] indexterm:[Action,Property,name] |interval |0 |How frequently (in seconds) to perform the operation. A value of 0 means "when needed". A positive value defines a 'recurring action', which is typically used with <>. indexterm:[interval,Action Property] indexterm:[Action,Property,interval] |timeout | |How long to wait before declaring the action has failed indexterm:[timeout,Action Property] indexterm:[Action,Property,timeout] |on-fail a|Varies by action: * +stop+: +fence+ if +stonith-enabled+ is true or +block+ otherwise * +demote+: +on-fail+ of the +monitor+ action with +role+ set to +Master+, if present, enabled, and configured to a value other than +demote+, or +restart+ otherwise * all other actions: +restart+ a|The action to take if this action ever fails. Allowed values: * +ignore:+ Pretend the resource did not fail. * +block:+ Don't perform any further operations on the resource. * +stop:+ Stop the resource and do not start it elsewhere. * +demote:+ Demote the resource, without a full restart. This is valid only for +promote+ actions, and for +monitor+ actions with both a nonzero +interval+ and +role+ set to +Master+; for any other action, a configuration error will - be logged, and the default behavior will be used. + be logged, and the default behavior will be used. *(since 2.0.5)* * +restart:+ Stop the resource and start it again (possibly on a different node). * +fence:+ STONITH the node on which the resource failed. * +standby:+ Move _all_ resources away from the node on which the resource failed. indexterm:[on-fail,Action Property] indexterm:[Action,Property,on-fail] |enabled |TRUE |If +false+, ignore this operation definition. This is typically used to pause a particular recurring +monitor+ operation; for instance, it can complement the respective resource being unmanaged (+is-managed=false+), as this alone will <>. Disabling the operation does not suppress all actions of the given type. Allowed values: +true+, +false+. indexterm:[enabled,Action Property] indexterm:[Action,Property,enabled] |record-pending |TRUE |If +true+, the intention to perform the operation is recorded so that GUIs and CLI tools can indicate that an operation is in progress. This is best set as an _operation default_ (see <>). Allowed values: +true+, +false+. indexterm:[enabled,Action Property] indexterm:[Action,Property,enabled] |role | |Run the operation only on node(s) that the cluster thinks should be in the specified role. This only makes sense for recurring +monitor+ operations. Allowed (case-sensitive) values: +Stopped+, +Started+, and in the case of <>, +Slave+ and +Master+. indexterm:[role,Action Property] indexterm:[Action,Property,role] |========================================================= [NOTE] ==== When +on-fail+ is set to +demote+, recovery from failure by a successful demote causes the cluster to recalculate whether and where a new instance should be promoted. The node with the failure is eligible, so if master scores have not changed, it will be promoted again. There is no direct equivalent of +migration-threshold+ for the master role, but the same effect can be achieved with a location constraint using a <> with a node attribute expression for the resource's fail count. For example, to immediately ban the master role from a node with any failed promote or master monitor: [source,XML] ---- ---- This example assumes that there is a promotable clone of the +my_primitive+ resource (note that the primitive name, not the clone name, is used in the rule), and that there is a recurring 10-second-interval monitor configured for the master role (fail count attributes specify the interval in milliseconds). ==== [[s-resource-monitoring]] === Monitoring Resources for Failure === When Pacemaker first starts a resource, it runs one-time +monitor+ operations (referred to as 'probes') to ensure the resource is running where it's supposed to be, and not running where it's not supposed to be. (This behavior can be affected by the +resource-discovery+ location constraint property.) Other than those initial probes, Pacemaker will 'not' (by default) check that the resource continues to stay healthy. footnote:[Currently, anyway. Automatic monitoring operations may be added in a future version of Pacemaker.] You must configure +monitor+ operations explicitly to perform these checks. .An OCF resource with a recurring health check ===== [source,XML] ------- ------- ===== By default, a +monitor+ operation will ensure that the resource is running where it is supposed to. The +target-role+ property can be used for further checking. For example, if a resource has one +monitor+ operation with +interval=10 role=Started+ and a second +monitor+ operation with +interval=11 role=Stopped+, the cluster will run the first monitor on any nodes it thinks 'should' be running the resource, and the second monitor on any nodes that it thinks 'should not' be running the resource (for the truly paranoid, who want to know when an administrator manually starts a service by mistake). [NOTE] ==== Currently, monitors with +role=Stopped+ are not implemented for <> resources. ==== [[s-monitoring-unmanaged]] === Monitoring Resources When Administration is Disabled === Recurring +monitor+ operations behave differently under various administrative settings: * When a resource is unmanaged (by setting +is-managed=false+): No monitors will be stopped. + If the unmanaged resource is stopped on a node where the cluster thinks it should be running, the cluster will detect and report that it is not, but it will not consider the monitor failed, and will not try to start the resource until it is managed again. + Starting the unmanaged resource on a different node is strongly discouraged and will at least cause the cluster to consider the resource failed, and may require the resource's +target-role+ to be set to +Stopped+ then +Started+ to be recovered. * When a node is put into standby: All resources will be moved away from the node, and all +monitor+ operations will be stopped on the node, except those specifying +role+ as +Stopped+ (which will be newly initiated if appropriate). * When the cluster is put into maintenance mode: All resources will be marked as unmanaged. All monitor operations will be stopped, except those specifying +role+ as +Stopped+ (which will be newly initiated if appropriate). As with single unmanaged resources, starting a resource on a node other than where the cluster expects it to be will cause problems. [[s-operation-defaults]] === Setting Global Defaults for Operations === You can change the global default values for operation properties in a given cluster. These are defined in an +op_defaults+ section of the CIB's +configuration+ section, and can be set with `crm_attribute`. For example, ---- # crm_attribute --type op_defaults --name timeout --update 20s ---- would default each operation's +timeout+ to 20 seconds. If an operation's definition also includes a value for +timeout+, then that value would be used for that operation instead. === When Implicit Operations Take a Long Time === The cluster will always perform a number of implicit operations: +start+, +stop+ and a non-recurring +monitor+ operation used at startup to check whether the resource is already active. If one of these is taking too long, then you can create an entry for them and specify a longer timeout. .An OCF resource with custom timeouts for its implicit actions ===== [source,XML] ------- ------- ===== === Multiple Monitor Operations === Provided no two operations (for a single resource) have the same name and interval, you can have as many +monitor+ operations as you like. In this way, you can do a superficial health check every minute and progressively more intense ones at higher intervals. To tell the resource agent what kind of check to perform, you need to provide each monitor with a different value for a common parameter. The OCF standard creates a special parameter called +OCF_CHECK_LEVEL+ for this purpose and dictates that it is "made available to the resource agent without the normal +OCF_RESKEY+ prefix". Whatever name you choose, you can specify it by adding an +instance_attributes+ block to the +op+ tag. It is up to each resource agent to look for the parameter and decide how to use it. .An OCF resource with two recurring health checks, performing different levels of checks specified via +OCF_CHECK_LEVEL+. ===== [source,XML] ------- ------- ===== === Disabling a Monitor Operation === The easiest way to stop a recurring monitor is to just delete it. However, there can be times when you only want to disable it temporarily. In such cases, simply add +enabled=false+ to the operation's definition. .Example of an OCF resource with a disabled health check ===== [source,XML] ------- ------- ===== This can be achieved from the command line by executing: ---- # cibadmin --modify --xml-text '' ---- Once you've done whatever you needed to do, you can then re-enable it with ---- # cibadmin --modify --xml-text '' ---- diff --git a/doc/sphinx/Pacemaker_Explained/rules.rst b/doc/sphinx/Pacemaker_Explained/rules.rst index b897fffa1a..fde2663cfa 100644 --- a/doc/sphinx/Pacemaker_Explained/rules.rst +++ b/doc/sphinx/Pacemaker_Explained/rules.rst @@ -1,948 +1,949 @@ .. index:: single: rule .. _rules: Rules ----- Rules can be used to make your configuration more dynamic, allowing values to change depending on the time or the value of a node attribute. Examples of things rules are useful for: * Set a higher value for :ref:`resource-stickiness ` during working hours, to minimize downtime, and a lower value on weekends, to allow resources to move to their most preferred locations when people aren't around to notice. * Automatically place the cluster into maintenance mode during a scheduled maintenance window. * Assign certain nodes and resources to a particular department via custom node attributes and meta-attributes, and add a single location constraint that restricts the department's resources to run only on those nodes. Each constraint type or property set that supports rules may contain one or more ``rule`` elements specifying conditions under which the constraint or properties take effect. Examples later in this chapter will make this clearer. .. index:: pair: XML element; rule Rule Properties ############### .. table:: **Attributes of a rule Element** +-----------------+-------------+-------------------------------------------+ | Attribute | Default | Description | +=================+=============+===========================================+ | id | | .. index:: | | | | pair: rule; id | | | | | | | | A unique name for this element (required) | +-----------------+-------------+-------------------------------------------+ | role | ``Started`` | .. index:: | | | | pair: rule; role | | | | | | | | The rule is in effect only when the | | | | resource is in the specified role. | | | | Allowed values are ``Started``, ``Slave``,| | | | and ``Master``. A rule with a ``role`` of | | | | ``Master`` cannot determine the initial | | | | location of a clone instance and will | | | | only affect which of the active instances | | | | will be promoted. | +-----------------+-------------+-------------------------------------------+ | score | | .. index:: | | | | pair: rule; score | | | | | | | | If this rule is used in a location | | | | constraint and evaluates to true, apply | | | | this score to the constraint. Only one of | | | | ``score`` and ``score-attribute`` may be | | | | used. | +-----------------+-------------+-------------------------------------------+ | score-attribute | | .. index:: | | | | pair: rule; score-attribute | | | | | | | | If this rule is used in a location | | | | constraint and evaluates to true, use the | | | | value of this node attribute as the score | | | | to apply to the constraint. Only one of | | | | ``score`` and ``score-attribute`` may be | | | | used. | +-----------------+-------------+-------------------------------------------+ | boolean-op | ``and`` | .. index:: | | | | pair: rule; boolean-op | | | | | | | | If this rule contains more than one | | | | condition, a value of ``and`` specifies | | | | that the rule evaluates to true only if | | | | all conditions are true, and a value of | | | | ``or`` specifies that the rule evaluates | | | | to true if any condition is true. | +-----------------+-------------+-------------------------------------------+ A ``rule`` element must contain one or more conditions. A condition may be an ``expression`` element, a ``date_expression`` element, or another ``rule`` element. .. index:: single: rule; node attribute expression single: node attribute; rule expression pair: XML element; expression .. _node_attribute_expressions: Node Attribute Expressions ########################## Expressions are rule conditions based on the values of node attributes. .. table:: **Attributes of an expression Element** +--------------+---------------------------------+-------------------------------------------+ | Attribute | Default | Description | +==============+=================================+===========================================+ | id | | .. index:: | | | | pair: expression; id | | | | | | | | A unique name for this element (required) | +--------------+---------------------------------+-------------------------------------------+ | attribute | | .. index:: | | | | pair: expression; attribute | | | | | | | | The node attribute to test (required) | +--------------+---------------------------------+-------------------------------------------+ | type | The default type for | .. index:: | | | ``lt``, ``gt``, ``lte``, and | pair: expression; type | | | ``gte`` operations is ``number``| | | | if either value contains a | How the node attributes should be | | | decimal point character, or | compared. Allowed values are ``string``, | - | | ``integer`` otherwise. The | ``integer``, ``number``, and ``version``. | - | | default type for all other | ``integer`` truncates floating-point | - | | operations is ``string``. If a | values if necessary before performing an | - | | numeric parse fails for either | integer comparison. ``number`` performs a | - | | value, then the values are | floating-point comparison. | - | | compared as type ``string``. | | + | | ``integer`` otherwise. The | ``integer`` *(since 2.0.5)*, ``number``, | + | | default type for all other | and ``version``. ``integer`` truncates | + | | operations is ``string``. If a | floating-point values if necessary before | + | | numeric parse fails for either | performing a 64-bit integer comparison. | + | | value, then the values are | ``number`` performs a double-precision | + | | compared as type ``string``. | floating-point comparison | + | | | *(32-bit integer before 2.0.5)*. | +--------------+---------------------------------+-------------------------------------------+ | operation | | .. index:: | | | | pair: expression; operation | | | | | | | | The comparison to perform (required). | | | | Allowed values: | | | | | | | | * ``lt:`` True if the node attribute value| | | | is less than the comparison value | | | | * ``gt:`` True if the node attribute value| | | | is greater than the comparison value | | | | * ``lte:`` True if the node attribute | | | | value is less than or equal to the | | | | comparison value | | | | * ``gte:`` True if the node attribute | | | | value is greater than or equal to the | | | | comparison value | | | | * ``eq:`` True if the node attribute value| | | | is equal to the comparison value | | | | * ``ne:`` True if the node attribute value| | | | is not equal to the comparison value | | | | * ``defined:`` True if the node has the | | | | named attribute | | | | * ``not_defined:`` True if the node does | | | | not have the named attribute | +--------------+---------------------------------+-------------------------------------------+ | value | | .. index:: | | | | pair: expression; value | | | | | | | | User-supplied value for comparison | | | | (required for operations other than | | | | ``defined`` and ``not_defined``) | +--------------+---------------------------------+-------------------------------------------+ | value-source | ``literal`` | .. index:: | | | | pair: expression; value-source | | | | | | | | How the ``value`` is derived. Allowed | | | | values: | | | | | | | | * ``literal``: ``value`` is a literal | | | | string to compare against | | | | * ``param``: ``value`` is the name of a | | | | resource parameter to compare against | | | | (only valid in location constraints) | | | | * ``meta``: ``value`` is the name of a | | | | resource meta-attribute to compare | | | | against (only valid in location | | | | constraints) | +--------------+---------------------------------+-------------------------------------------+ .. _node-attribute-expressions-special: In addition to custom node attributes defined by the administrator, the cluster defines special, built-in node attributes for each node that can also be used in rule expressions. .. table:: **Built-in Node Attributes** +---------------+-----------------------------------------------------------+ | Name | Value | +===============+===========================================================+ | #uname | :ref:`Node name ` | +---------------+-----------------------------------------------------------+ | #id | Node ID | +---------------+-----------------------------------------------------------+ | #kind | Node type. Possible values are ``cluster``, ``remote``, | | | and ``container``. Kind is ``remote`` for Pacemaker Remote| | | nodes created with the ``ocf:pacemaker:remote`` resource, | | | and ``container`` for Pacemaker Remote guest nodes and | | | bundle nodes | +---------------+-----------------------------------------------------------+ | #is_dc | ``true`` if this node is the cluster's Designated | | | Controller (DC), ``false`` otherwise | +---------------+-----------------------------------------------------------+ | #cluster-name | The value of the ``cluster-name`` cluster property, if set| +---------------+-----------------------------------------------------------+ | #site-name | The value of the ``site-name`` node attribute, if set, | | | otherwise identical to ``#cluster-name`` | +---------------+-----------------------------------------------------------+ | #role | The role the relevant promotable clone resource has on | | | this node. Valid only within a rule for a location | | | constraint for a promotable clone resource. | +---------------+-----------------------------------------------------------+ .. Add_to_above_table_if_released: +---------------+-----------------------------------------------------------+ | #ra-version | The installed version of the resource agent on the node, | | | as defined by the ``version`` attribute of the | | | ``resource-agent`` tag in the agent's metadata. Valid only| | | within rules controlling resource options. This can be | | | useful during rolling upgrades of a backward-incompatible | - | | resource agent. '(since x.x.x)' | + | | resource agent. *(since x.x.x)* | .. index:: single: rule; date/time expression pair: XML element; date_expression Date/Time Expressions ##################### Date/time expressions are rule conditions based (as the name suggests) on the current date and time. A ``date_expression`` element may optionally contain a ``date_spec`` or ``duration`` element depending on the context. .. table:: **Attributes of a date_expression Element** +---------------+-----------------------------------------------------------+ | Attribute | Description | +===============+===========================================================+ | id | .. index:: | | | pair: id; date_expression | | | | | | A unique name for this element (required) | +---------------+-----------------------------------------------------------+ | start | .. index:: | | | pair: start; date_expression | | | | | | A date/time conforming to the | | | `ISO8601 `_ | | | specification. May be used when ``operation`` is | | | ``in_range`` (in which case at least one of ``start`` or | | | ``end`` must be specified) or ``gt`` (in which case | | | ``start`` is required). | +---------------+-----------------------------------------------------------+ | end | .. index:: | | | pair: end; date_expression | | | | | | A date/time conforming to the | | | `ISO8601 `_ | | | specification. May be used when ``operation`` is | | | ``in_range`` (in which case at least one of ``start`` or | | | ``end`` must be specified) or ``lt`` (in which case | | | ``end`` is required). | +---------------+-----------------------------------------------------------+ | operation | .. index:: | | | pair: operation; date_expression | | | | | | Compares the current date/time with the start and/or end | | | date, depending on the context. Allowed values: | | | | | | * ``gt:`` True if the current date/time is after ``start``| | | * ``lt:`` True if the current date/time is before ``end`` | | | * ``in_range:`` True if the current date/time is after | | | ``start`` (if specified) and before either ``end`` (if | | | specified) or ``start`` plus the value of the | | | ``duration`` element (if one is contained in the | | | ``date_expression``) | | | * ``date_spec:`` True if the current date/time matches | | | the specification given in the contained ``date_spec`` | | | element (described below) | +---------------+-----------------------------------------------------------+ .. note:: There is no ``eq``, ``neq``, ``gte``, or ``lte`` operation, since they would be valid only for a single second. .. index:: single: date specification pair: XML element; date_spec Date Specifications ___________________ A ``date_spec`` element is used to create a cron-like expression relating to time. Each field can contain a single number or range. Any field not supplied is ignored. .. table:: **Attributes of a date_spec Element** +---------------+-----------------------------------------------------------+ | Attribute | Description | +===============+===========================================================+ | id | .. index:: | | | pair: id; date_spec | | | | | | A unique name for this element (required) | +---------------+-----------------------------------------------------------+ | hours | .. index:: | | | pair: hours; date_spec | | | | | | Allowed values: 0-23 (where 0 is midnight and 23 is | | | 11 p.m.) | +---------------+-----------------------------------------------------------+ | monthdays | .. index:: | | | pair: monthdays; date_spec | | | | | | Allowed values: 1-31 (depending on month and year) | +---------------+-----------------------------------------------------------+ | weekdays | .. index:: | | | pair: weekdays; date_spec | | | | | | Allowed values: 1-7 (where 1 is Monday and 7 is Sunday) | +---------------+-----------------------------------------------------------+ | yeardays | .. index:: | | | pair: yeardays; date_spec | | | | | | Allowed values: 1-366 (depending on the year) | +---------------+-----------------------------------------------------------+ | months | .. index:: | | | pair: months; date_spec | | | | | | Allowed values: 1-12 | +---------------+-----------------------------------------------------------+ | weeks | .. index:: | | | pair: weeks; date_spec | | | | | | Allowed values: 1-53 (depending on weekyear) | +---------------+-----------------------------------------------------------+ | years | .. index:: | | | pair: years; date_spec | | | | | | Year according to the Gregorian calendar | +---------------+-----------------------------------------------------------+ | weekyears | .. index:: | | | pair: weekyears; date_spec | | | | | | Year in which the week started; for example, 1 January | | | 2005 can be specified in ISO 8601 as "2005-001 Ordinal", | | | "2005-01-01 Gregorian" or "2004-W53-6 Weekly" and thus | | | would match ``years="2005"`` or ``weekyears="2004"`` | +---------------+-----------------------------------------------------------+ | moon | .. index:: | | | pair: moon; date_spec | | | | | | Allowed values are 0-7 (where 0 is the new moon and 4 is | | | full moon). Seriously, you can use this. This was | | | implemented to demonstrate the ease with which new | | | comparisons could be added. | +---------------+-----------------------------------------------------------+ For example, ``monthdays="1"`` matches the first day of every month, and ``hours="09-17"`` matches the hours between 9 a.m. and 5 p.m. (inclusive). At this time, multiple ranges (e.g. ``weekdays="1,2"`` or ``weekdays="1-2,5-6"``) are not supported. .. note:: Pacemaker can calculate when evaluation of a ``date_expression`` with an ``operation`` of ``gt``, ``lt``, or ``in_range`` will next change, and schedule a cluster re-check for that time. However, it does not do this for ``date_spec``. Instead, it evaluates the ``date_spec`` whenever a cluster re-check naturally happens via a cluster event or the ``cluster-recheck-interval`` cluster option. For example, if you have a ``date_spec`` enabling a resource from 9 a.m. to 5 p.m., and ``cluster-recheck-interval`` has been set to 5 minutes, then sometime between 9 a.m. and 9:05 a.m. the cluster would notice that it needs to start the resource, and sometime between 5 p.m. and 5:05 p.m. it would realize that it needs to stop the resource. The timing of the actual start and stop actions will further depend on factors such as any other actions the cluster may need to perform first, and the load of the machine. .. index:: single: duration pair: XML element; duration Durations _________ A ``duration`` is used to calculate a value for ``end`` when one is not supplied to ``in_range`` operations. It contains one or more attributes each containing a single number. Any attribute not supplied is ignored. .. table:: **Attributes of a duration Element** +---------------+-----------------------------------------------------------+ | Attribute | Description | +===============+===========================================================+ | id | .. index:: | | | pair: id; duration | | | | | | A unique name for this element (required) | +---------------+-----------------------------------------------------------+ | seconds | .. index:: | | | pair: seconds; duration | | | | | | This many seconds will be added to the total duration | +---------------+-----------------------------------------------------------+ | minutes | .. index:: | | | pair: minutes; duration | | | | | | This many minutes will be added to the total duration | +---------------+-----------------------------------------------------------+ | hours | .. index:: | | | pair: hours; duration | | | | | | This many hours will be added to the total duration | +---------------+-----------------------------------------------------------+ | weeks | .. index:: | | | pair: weeks; duration | | | | | | This many weeks will be added to the total duration | +---------------+-----------------------------------------------------------+ | months | .. index:: | | | pair: months; duration | | | | | | This many months will be added to the total duration | +---------------+-----------------------------------------------------------+ | years | .. index:: | | | pair: years; duration | | | | | | This many years will be added to the total duration | +---------------+-----------------------------------------------------------+ Example Time-Based Expressions ______________________________ A small sample of how time-based expressions can be used: .. topic:: True if now is any time in the year 2005 .. code-block:: xml or equivalently: .. code-block:: xml .. topic:: 9 a.m. to 5 p.m. Monday through Friday .. code-block:: xml Note that the ``16`` matches all the way through ``16:59:59``, because the numeric value of the hour still matches. .. topic:: 9 a.m. to 6 p.m. Monday through Friday or anytime Saturday .. code-block:: xml .. topic:: 9 a.m. to 5 p.m. or 9 p.m. to 12 a.m. Monday through Friday .. code-block:: xml .. topic:: Mondays in March 2005 .. code-block:: xml .. note:: Because no time is specified with the above dates, 00:00:00 is implied. This means that the range includes all of 2005-03-01 but none of 2005-04-01. You may wish to write ``end`` as ``"2005-03-31T23:59:59"`` to avoid confusion. .. topic:: A full moon on Friday the 13th .. code-block:: xml .. index:: single: rule; resource expression single: resource; rule expression pair: XML element; rsc_expression Resource Expressions #################### -An ``rsc_expression`` is a rule condition based on a resource agent's -properties. This rule is only valid within an ``rsc_defaults`` or +An ``rsc_expression`` *(since 2.0.5)* is a rule condition based on a resource +agent's properties. This rule is only valid within an ``rsc_defaults`` or ``op_defaults`` context. None of the matching attributes of ``class``, ``provider``, and ``type`` are required. If one is omitted, all values of that attribute will match. For instance, omitting ``type`` means every type will match. .. table:: **Attributes of a rsc_expression Element** +---------------+-----------------------------------------------------------+ | Attribute | Description | +===============+===========================================================+ | id | .. index:: | | | pair: id; rsc_expression | | | | | | A unique name for this element (required) | +---------------+-----------------------------------------------------------+ | class | .. index:: | | | pair: class; rsc_expression | | | | | | The standard name to be matched against resource agents | +---------------+-----------------------------------------------------------+ | provider | .. index:: | | | pair: provider; rsc_expression | | | | | | If given, the vendor to be matched against resource | | | agents (only relevant when ``class`` is ``ocf``) | +---------------+-----------------------------------------------------------+ | type | .. index:: | | | pair: type; rsc_expression | | | | | | The name of the resource agent to be matched | +---------------+-----------------------------------------------------------+ Example Resource-Based Expressions __________________________________ A small sample of how resource-based expressions can be used: .. topic:: True for all ``ocf:heartbeat:IPaddr2`` resources .. code-block:: xml .. topic:: Provider doesn't apply to non-OCF resources .. code-block:: xml .. index:: single: rule; operation expression single: operation; rule expression pair: XML element; op_expression Operation Expressions ##################### -An ``op_expression`` is a rule condition based on an action of some resource -agent. This rule is only valid within an ``op_defaults`` context. +An ``op_expression`` *(since 2.0.5)* is a rule condition based on an action of +some resource agent. This rule is only valid within an ``op_defaults`` context. .. table:: **Attributes of an op_expression Element** +---------------+-----------------------------------------------------------+ | Attribute | Description | +===============+===========================================================+ | id | .. index:: | | | pair: id; op_expression | | | | | | A unique name for this element (required) | +---------------+-----------------------------------------------------------+ | name | .. index:: | | | pair: name; op_expression | | | | | | The action name to match against. This can be any action | | | supported by the resource agent; common values include | | | ``monitor``, ``start``, and ``stop`` (required). | +---------------+-----------------------------------------------------------+ | interval | .. index:: | | | pair: interval; op_expression | | | | | | The interval of the action to match against. If not given,| | | only the name attribute will be used to match. | +---------------+-----------------------------------------------------------+ Example Operation-Based Expressions ___________________________________ A small sample of how operation-based expressions can be used: .. topic:: True for all monitor actions .. code-block:: xml .. topic:: True for all monitor actions with a 10 second interval .. code-block:: xml .. index:: pair: location constraint; rule Using Rules to Determine Resource Location ########################################## A location constraint may contain one or more top-level rules. The cluster will act as if there is a separate location constraint for each rule that evaluates as true. Consider the following simple location constraint: .. topic:: Prevent resource ``webserver`` from running on node ``node3`` .. code-block:: xml The same constraint can be more verbosely written using a rule: .. topic:: Prevent resource ``webserver`` from running on node ``node3`` using a rule .. code-block:: xml The advantage of using the expanded form is that one could add more expressions (for example, limiting the constraint to certain days of the week), or activate the constraint by some node attribute other than node name. Location Rules Based on Other Node Properties _____________________________________________ The expanded form allows us to match on node properties other than its name. If we rated each machine's CPU power such that the cluster had the following nodes section: .. topic:: Sample node section with node attributes .. code-block:: xml then we could prevent resources from running on underpowered machines with this rule: .. topic:: Rule using a node attribute (to be used inside a location constraint) .. code-block:: xml Using ``score-attribute`` Instead of ``score`` ______________________________________________ When using ``score-attribute`` instead of ``score``, each node matched by the rule has its score adjusted differently, according to its value for the named node attribute. Thus, in the previous example, if a rule inside a location constraint for a resource used ``score-attribute="cpu_mips"``, ``c001n01`` would have its preference to run the resource increased by ``1234`` whereas ``c001n02`` would have its preference increased by ``5678``. .. index:: pair: cluster option; rule pair: instance attribute; rule pair: meta-attribute; rule pair: resource defaults; rule pair: operation defaults; rule pair: node attribute; rule Using Rules to Define Options ############################# Rules may be used to control a variety of options: * :ref:`Cluster options ` (``cluster_property_set`` elements) * :ref:`Node attributes ` (``instance_attributes`` or ``utilization`` elements inside a ``node`` element) * :ref:`Resource options ` (``utilization``, ``meta_attributes``, or ``instance_attributes`` elements inside a resource definition element or ``op`` , ``rsc_defaults``, ``op_defaults``, or ``template`` element) * :ref:`Operation properties ` (``meta_attributes`` elements inside an ``op`` or ``op_defaults`` element) Using Rules to Control Resource Options _______________________________________ Often some cluster nodes will be different from their peers. Sometimes, these differences -- e.g. the location of a binary or the names of network interfaces -- require resources to be configured differently depending on the machine they're hosted on. By defining multiple ``instance_attributes`` objects for the resource and adding a rule to each, we can easily handle these special cases. In the example below, ``mySpecialRsc`` will use eth1 and port 9999 when run on ``node1``, eth2 and port 8888 on ``node2`` and default to eth0 and port 9999 for all other nodes. .. topic:: Defining different resource options based on the node name .. code-block:: xml The order in which ``instance_attributes`` objects are evaluated is determined by their score (highest to lowest). If not supplied, the score defaults to zero. Objects with an equal score are processed in their listed order. If the ``instance_attributes`` object has no rule, or a ``rule`` that evaluates to ``true``, then for any parameter the resource does not yet have a value for, the resource will use the parameter values defined by the ``instance_attributes``. For example, given the configuration above, if the resource is placed on ``node1``: * ``special-node1`` has the highest score (3) and so is evaluated first; its rule evaluates to ``true``, so ``interface`` is set to ``eth1``. * ``special-node2`` is evaluated next with score 2, but its rule evaluates to ``false``, so it is ignored. * ``defaults`` is evaluated last with score 1, and has no rule, so its values are examined; ``interface`` is already defined, so the value here is not used, but ``port`` is not yet defined, so ``port`` is set to ``9999``. Using Rules to Control Resource Defaults ________________________________________ Rules can be used for resource and operation defaults. The following example illustrates how to set a different ``resource-stickiness`` value during and outside work hours. This allows resources to automatically move back to their most preferred hosts, but at a time that (in theory) does not interfere with business activities. .. topic:: Change ``resource-stickiness`` during working hours .. code-block:: xml Rules may be used similarly in ``instance_attributes`` or ``utilization`` blocks. Any single block may directly contain only a single rule, but that rule may itself contain any number of rules. ``rsc_expression`` and ``op_expression`` blocks may additionally be used to set defaults on either a single resource or across an entire class of resources with a single rule. ``rsc_expression`` may be used to select resource agents within both ``rsc_defaults`` and ``op_defaults``, while ``op_expression`` may only be used within ``op_defaults``. If multiple rules succeed for a given resource agent, the last one specified will be the one that takes effect. As with any other rule, boolean operations may be used to make more complicated expressions. .. topic:: Default all IPaddr2 resources to stopped .. code-block:: xml .. topic:: Default all monitor action timeouts to 7 seconds .. code-block:: xml .. topic:: Default the timeout on all 10-second-interval monitor actions on ``IPaddr2`` resources to 8 seconds .. code-block:: xml .. index:: pair: rule; cluster option Using Rules to Control Cluster Options ______________________________________ Controlling cluster options is achieved in much the same manner as specifying different resource options on different nodes. The difference is that because they are cluster options, one cannot (or should not, because they won't work) use attribute-based expressions. The following example illustrates how to set ``maintenance_mode`` during a scheduled maintenance window. This will keep the cluster running but not monitor, start, or stop resources during this time. .. topic:: Schedule a maintenance window for 9 to 11 p.m. CDT Sept. 20, 2019 .. code-block:: xml .. important:: The ``cluster_property_set`` with an ``id`` set to "cib-bootstrap-options" will *always* have the highest priority, regardless of any scores. Therefore, rules in another ``cluster_property_set`` can never take effect for any properties listed in the bootstrap set.