diff --git a/COPYING b/COPYING index a8335ca35a..3f7fb37b2e 100644 --- a/COPYING +++ b/COPYING @@ -1,14 +1,14 @@ Except where noted otherwise in the file itself, the source code for all Pacemaker programs is licensed under version 2 or later of the GNU General Public License (GPLv2+), its headers and libraries under version 2.1 or later of the less restrictive GNU Lesser General Public License (LGPLv2.1+), its documentation under version 4.0 or later of the Creative Commons Attribution-ShareAlike International Public License (CC-BY-SA v4.0+), and its init scripts under the Revised BSD license. The text of these licenses are provided in the "licenses" subdirectory. If you find any deviations from this policy, or wish to inquire about alternate -licensing arrangements, please e-mail pacemaker@oss.clusterlabs.org. +licensing arrangements, please e-mail andrew@beekhof.net. Licensing issues are further discussed on the ClusterLabs wiki (at http://clusterlabs.org/wiki/License). diff --git a/configure.ac b/configure.ac index 93bb32ca4a..64628cf99b 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2163 +1,2163 @@ dnl dnl autoconf for Pacemaker dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.59) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([version.m4]) -AC_INIT([pacemaker], VERSION_NUMBER, [pacemaker@oss.clusterlabs.org], +AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) dnl Workaround autoconf < 2.64 if test x"${PACKAGE_URL}" = x""; then AC_SUBST([PACKAGE_URL], PCMK_URL) fi PCMK_FEATURES="" HB_PKG=heartbeat AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/crm_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) dnl Older distros may need: AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION) AM_INIT_AUTOMAKE([foreign]) AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from dnl normal compilation. When a failure occurs, it will then display the full dnl command line dnl Wrap in m4_ifdef to avoid breaking on older platforms m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd BUILD_ATOMIC_ATTRD=1 dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC gl_EARLY gl_INIT AC_LIBTOOL_DLOPEN dnl Enable dlopen support... AC_LIBLTDL_CONVENIENCE dnl make libltdl a convenience lib AC_PROG_LIBTOOL AC_PROG_YACC AM_PROG_LEX AC_C_STRINGIZE AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)],[RC=1; AC_MSG_RESULT(no)]) return $RC } try_extract_header_define() { AC_MSG_CHECKING(if $2 in $1 exists) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) {\n" >> ${Cfile}.c printf "#ifdef %s\n" $2 >> ${Cfile}.c printf "printf(\"%%s\", %s);\n" $2 >> ${Cfile}.c printf "#endif \n return 0; }\n" >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} 2>/dev/null value= if test -x ${Cfile}; then value=`${Cfile} 2>/dev/null` fi if test x"${value}" == x""; then value=$3 AC_MSG_RESULT(default: $value) else AC_MSG_RESULT($value) fi printf $value rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM ${Cfile}.gcno } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM ${Cfile}.gcno } dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=no]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) AC_ARG_ENABLE([quiet], [ --enable-quiet Supress make output unless there is an error [default=no]]) AC_ARG_ENABLE([bundled-ltdl], [ --enable-bundled-ltdl Configure, build and install the standalone ltdl library bundled with ${PACKAGE} [default=no]]) LTDL_LIBS="" AC_ARG_ENABLE([no-stack], [ --enable-no-stack Only build the Policy Engine and pieces needed to support it [default=no]]) AC_ARG_ENABLE([upstart], [ --enable-upstart Enable support for managing resources via Upstart [default=try]], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], [ --enable-systemd Enable support for managing resources via systemd [default=try]], [], [enable_systemd=try], ) AC_ARG_ENABLE(hardening, [ --with-hardening Harden the resulting executables/libraries (best effort by default)], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) AC_ARG_WITH(ais, [ --with-ais Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH(corosync, [ --with-corosync Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ] dnl initialized in AC_ARG_WITH(ais...) already, dnl don't reset to try if it was given as --without-ais ) AC_ARG_WITH(heartbeat, [ --with-heartbeat Support the Heartbeat messaging and membership layer ], [ SUPPORT_HEARTBEAT=$withval ], [ SUPPORT_HEARTBEAT=try ], ) AC_ARG_WITH(cman, [ --with-cman Support the consumption of membership and quorum from cman ], [ SUPPORT_CMAN=$withval ], [ SUPPORT_CMAN=try ], ) AC_ARG_WITH(cpg, [ --with-cs-quorum Support the consumption of membership and quorum from corosync ], [ SUPPORT_CS_QUORUM=$withval ], [ SUPPORT_CS_QUORUM=try ], ) AC_ARG_WITH(nagios, [ --with-nagios Support nagios remote monitoring ], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) AC_ARG_WITH(nagios-plugin-dir, [ --with-nagios-plugin-dir=DIR Directory for nagios plugins [${NAGIOS_PLUGIN_DIR}]], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH(nagios-metadata-dir, [ --with-nagios-metadata-dir=DIR Directory for nagios plugins metadata [${NAGIOS_METADATA_DIR}]], [ NAGIOS_METADATA_DIR="$withval" ] ) AC_ARG_WITH(snmp, [ --with-snmp Support the SNMP protocol ], [ SUPPORT_SNMP=$withval ], [ SUPPORT_SNMP=try ], ) AC_ARG_WITH(esmtp, [ --with-esmtp Support the sending mail notifications with the esmtp library ], [ SUPPORT_ESMTP=$withval ], [ SUPPORT_ESMTP=try ], ) AC_ARG_WITH(acl, [ --with-acl Support CIB ACL ], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) AC_ARG_WITH(cibsecrets, [ --with-cibsecrets Support CIB secrets ], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) CSPREFIX="" AC_ARG_WITH(ais-prefix, [ --with-ais-prefix=DIR Prefix used when Corosync was installed [$prefix]], [ CSPREFIX=$withval ], [ CSPREFIX=$prefix ]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) SUPPORT_PROFILING=0 AC_ARG_WITH(profiling, [ --with-profiling Disable optimizations for effective profiling ], [ SUPPORT_PROFILING=$withval ]) AC_ARG_WITH(coverage, [ --with-coverage Disable optimizations for effective profiling ], [ SUPPORT_COVERAGE=$withval ]) PUBLICAN_BRAND="common" AC_ARG_WITH(brand, [ --with-brand=brand Brand to use for generated documentation (set empty for no docs) [$PUBLICAN_BRAND]], [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ]) AC_SUBST(PUBLICAN_BRAND) ASCIIDOC_CLI_TYPE="pcs" AC_ARG_WITH(doc-cli, [ --with-doc-cli=cli_type CLI type to use for generated documentation. [$ASCIIDOC_CLI_TYPE]], [ ASCIIDOC_CLI_TYPE="$withval" ]) AC_SUBST(ASCIIDOC_CLI_TYPE) CONFIGDIR="" AC_ARG_WITH(configdir, [ --with-configdir=DIR Directory for Pacemaker configuration file [${CONFIGDIR}]], [ CONFIGDIR="$withval" ] ) dnl =============================================== dnl General Processing dnl =============================================== AC_SUBST(HB_PKG) INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing ais_prefix: ${CSPREFIX}) case $CSPREFIX in dnl For consistency with Heartbeat, map NONE->$prefix NONE) CSPREFIX=$prefix;; prefix) CSPREFIX=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix prefix|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} #docdir=${datadir}/doc/packages/${PACKAGE} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac dnl Eventually remove this if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) ac_save_CFLAGS=$CFLAGS CFLAGS="-Wall -Werror" AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [ #include #include #include ], [ int max = 512; uint64_t bignum = 42; char *buffer = malloc(max); const char *random = "random"; snprintf(buffer, max-1, "", bignum, random); fprintf(stderr, "Result: %s\n", buffer); ] )], [U64T="%lu"], [U64T="%llu"] ) CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) AM_PATH_PYTHON AC_CHECK_PROGS(MAKE, gmake make) AC_PATH_PROGS(HTML2TXT, lynx w3m) AC_PATH_PROGS(HELP2MAN, help2man) AC_PATH_PROGS(POD2MAN, pod2man, pod2man) AC_PATH_PROGS(ASCIIDOC, asciidoc) AC_PATH_PROGS(PUBLICAN, publican) AC_PATH_PROGS(INKSCAPE, inkscape) AC_PATH_PROGS(XSLTPROC, xsltproc) AC_PATH_PROGS(XMLCATALOG, xmlcatalog) AC_PATH_PROGS(FOP, fop) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PKGCONFIG, pkg-config) AC_PATH_PROGS(XML2CONFIG, xml2-config) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) dnl Disable these until we decide if the stonith config file should be supported dnl AC_PATH_PROGS(BISON, bison) dnl AC_PATH_PROGS(FLEX, flex) dnl AC_PATH_PROGS(HAVE_YACC, $YACC) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"") if test x"${ASCIIDOC}" != x""; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi SUPPORT_STONITH_CONFIG=0 if test x"${HAVE_YACC}" != x"" -a x"${FLEX}" != x"" -a x"${BISON}" != x""; then SUPPORT_STONITH_CONFIG=1 PCMK_FEATURES="$PCMK_FEATURES st-conf" fi AM_CONDITIONAL(BUILD_STONITH_CONFIG, test $SUPPORT_STONITH_CONFIG = 1) AC_DEFINE_UNQUOTED(SUPPORT_STONITH_CONFIG, $SUPPORT_STONITH_CONFIG, Support a stand-alone stonith config file in addition to the CIB) publican_intree_brand=no if test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""; then dnl special handling for clusterlabs brand (possibly in-tree version used) test "${PUBLICAN_BRAND}" != "clusterlabs" \ || test -d /usr/share/publican/Common_Content/clusterlabs if test $? -ne 0; then dnl Unknown option: brand_dir vs. Option brand_dir requires an argument if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common]) PUBLICAN_BRAND=common else publican_intree_brand=yes fi fi AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand]) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi AM_CONDITIONAL([BUILD_DOCBOOK], [test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""]) AM_CONDITIONAL([PUBLICAN_INTREE_BRAND], [test x"${publican_intree_brand}" = x"yes"]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolearably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_FUNCS([sched_setscheduler]) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi if test x"${PKGCONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi if $PKGCONFIG --exists glib-2.0 then GLIBCONFIG="$PKGCONFIG glib-2.0" else set -x echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH $PKGCONFIG --exists glib-2.0; echo $? $PKGCONFIG --cflags glib-2.0; echo $? $PKGCONFIG glib-2.0; echo $? set +x AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE}) fi AC_MSG_RESULT(using $GLIBCONFIG) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi if test "X$GLIBCONFIG" != X; then AC_MSG_CHECKING(for special glib includes: ) GLIBHEAD=`$GLIBCONFIG --cflags` AC_MSG_RESULT($GLIBHEAD) CPPFLAGS="$CPPFLAGS $GLIBHEAD" AC_MSG_CHECKING(for glib library flags) GLIBLIB=`$GLIBCONFIG --libs` AC_MSG_RESULT($GLIBLIB) LIBS="$LIBS $GLIBLIB" fi dnl FreeBSD needs -lcompat for ftime() used by lrmd.c AC_CHECK_LIB([compat], [ftime], [COMPAT_LIBS='-lcompat']) AC_SUBST(COMPAT_LIBS) dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(asm/types.h) AC_CHECK_HEADERS(assert.h) AC_CHECK_HEADERS(auth-client.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(fcntl.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/errqueue.h) AC_CHECK_HEADERS(linux/swab.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pam/pam_appl.h) AC_CHECK_HEADERS(pthread.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(security/pam_appl.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/poll.h) AC_CHECK_HEADERS(sys/reboot.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/select.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/signalfd.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/timeb.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/uio.h) AC_CHECK_HEADERS(sys/un.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) AC_CHECK_HEADERS(winsock.h) dnl These headers need prerequisits before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) dnl AC_CHECK_HEADERS(netinet/icmp6.h) dnl AC_CHECK_HEADERS(netinet/ip6.h) dnl AC_CHECK_HEADERS(netinet/ip_icmp.h) AC_MSG_CHECKING(for special libxml2 includes) if test "x$XML2CONFIG" = "x"; then AC_MSG_ERROR(libxml2 config not found) else XML2HEAD="`$XML2CONFIG --cflags`" AC_MSG_RESULT($XML2HEAD) AC_CHECK_LIB(xml2, xmlReadMemory) AC_CHECK_LIB(xslt, xsltApplyStylesheet) fi CPPFLAGS="$CPPFLAGS $XML2HEAD" AC_CHECK_HEADERS(libxml/xpath.h) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(The libxml developement headers were not found) fi if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(The libxslt developement headers were not found) fi dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(g_log_set_default_handler) AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) dnl ======================================================================== dnl ltdl dnl ======================================================================== AC_CHECK_LIB(ltdl, lt_dlopen, [LTDL_foo=1]) if test "x${enable_bundled_ltdl}" = "xyes"; then if test $ac_cv_lib_ltdl_lt_dlopen = yes; then AC_MSG_NOTICE([Disabling usage of installed ltdl]) fi ac_cv_lib_ltdl_lt_dlopen=no fi LIBLTDL_DIR="" if test $ac_cv_lib_ltdl_lt_dlopen != yes ; then AC_MSG_NOTICE([Installing local ltdl]) LIBLTDL_DIR=libltdl ( cd $srcdir ; $TAR -xvf libltdl.tar ) if test "$?" -ne 0; then AC_MSG_ERROR([$TAR of libltdl.tar in $srcdir failed]) fi AC_CONFIG_SUBDIRS(libltdl) else LIBS="$LIBS -lltdl" AC_MSG_NOTICE([Using installed ltdl]) INCLTDL="" LIBLTDL="" fi AC_SUBST(INCLTDL) AC_SUBST(LIBLTDL) AC_SUBST(LIBLTDL_DIR) dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_TRY_COMPILE([#include ],[sighandler_t *f;], has_sighandler_t=yes,has_sighandler_t=no) AC_MSG_RESULT($has_sighandler_t) if test "$has_sighandler_t" = "yes" ; then AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) CURSESLIBS=`$PKGCONFIG --libs ncurses` || CURSESLIBS='-lncurses' fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) CURSESLIBS=`$PKGCONFIG --libs ncurses` || CURSESLIBS='-lncurses' fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" ac_save_CFLAGS=$CFLAGS CFLAGS="-Wcast-qual -Werror" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 if cc_supports_flag -fPIC; then CFLAGS="$CFLAGS -fPIC" fi AC_MSG_CHECKING(whether printw() requires argument of "const char *") AC_LINK_IFELSE( [AC_LANG_PROGRAM( [ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [ac_cv_compatible_printw=yes], [ac_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT([$ac_cv_compatible_printw]) if test "$ac_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage dnl Enable gprof #LIBS="$LIBS -pg" #CFLAGS="$CFLAGS -pg" ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0;; esac AC_MSG_NOTICE(New CFLAGS: $CFLAGS) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - Heartbeat / LibQB dnl ======================================================================== dnl Compatibility checks AC_CHECK_MEMBERS([struct lrm_ops.fail_rsc],,,[[#include ]]) if test x${enable_no_stack} = xyes; then SUPPORT_HEARTBEAT=no SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb >= 0.13) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" AC_CHECK_HEADERS(qb/qbipc_common.h) AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function])) AC_CHECK_HEADERS(heartbeat/hb_config.h) AC_CHECK_HEADERS(heartbeat/glue_config.h) AC_CHECK_HEADERS(stonith/stonith.h) AC_CHECK_HEADERS(agent_config.h) GLUE_HEADER=none HAVE_GLUE=0 if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then GLUE_HEADER=glue_config.h HAVE_GLUE=1 elif test "$ac_cv_header_heartbeat_hb_config_h" = "yes"; then GLUE_HEADER=hb_config.h HAVE_GLUE=1 else AC_MSG_WARN(cluster-glue development headers were not found) fi if test "$ac_cv_header_stonith_stonith_h" = "yes"; then PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi if test $HAVE_GLUE = 1; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) fi dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_DTD_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DTD_DIRECTORY,"$CRM_DTD_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_DTD_DIRECTORY) CRM_CORE_DIR=`try_extract_header_define $GLUE_HEADER HA_COREDIR ${localstatedir}/lib/pacemaker/cores` AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) CRM_DAEMON_USER=`try_extract_header_define $GLUE_HEADER HA_CCMUSER hacluster` AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) CRM_DAEMON_GROUP=`try_extract_header_define $GLUE_HEADER HA_APIGROUP haclient` AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_STATE_DIR=${localstatedir}/run/crm AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets) AC_SUBST(CRM_STATE_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_LEGACY_CONFIG_DIR="${localstatedir}/lib/heartbeat/crm" AC_DEFINE_UNQUOTED(CRM_LEGACY_CONFIG_DIR,"$CRM_LEGACY_CONFIG_DIR", Where Pacemaker used to keep configuration files) AC_SUBST(CRM_LEGACY_CONFIG_DIR) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) HB_DAEMON_DIR=`try_extract_header_define $GLUE_HEADER HA_LIBHBDIR $libdir/heartbeat` AC_DEFINE_UNQUOTED(HB_DAEMON_DIR,"$HB_DAEMON_DIR", Location Heartbeat expects Pacemaker daemons to be in) AC_SUBST(HB_DAEMON_DIR) dnl Needed so that the Corosync plugin can clear out the directory as Heartbeat does HA_STATE_DIR=`try_extract_header_define $GLUE_HEADER HA_VARRUNDIR ${localstatedir}/run` AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where Heartbeat keeps state files and sockets) AC_SUBST(HA_STATE_DIR) CRM_RSCTMP_DIR=`try_extract_header_define agent_config.h HA_RSCTMPDIR $HA_STATE_DIR/resource-agents` AC_MSG_CHECKING(Scratch dir for resource agents) AC_MSG_RESULT($CRM_RSCTMP_DIR) AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) dnl Needed for the location of hostcache in CTS.py HA_VARLIBHBDIR=`try_extract_header_define $GLUE_HEADER HA_VARLIBHBDIR ${localstatedir}/lib/heartbeat` AC_SUBST(HA_VARLIBHBDIR) AC_DEFINE_UNQUOTED(UUID_FILE,"$localstatedir/lib/heartbeat/hb_uuid", Location of Heartbeat's UUID file) OCF_ROOT_DIR=`try_extract_header_define $GLUE_HEADER OCF_ROOT_DIR /usr/lib/ocf` if test "X$OCF_ROOT_DIR" = X; then AC_MSG_ERROR(Could not locate OCF directory) fi AC_SUBST(OCF_ROOT_DIR) OCF_RA_DIR=`try_extract_header_define $GLUE_HEADER OCF_RA_DIR $OCF_ROOT_DIR/resource.d` AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) RH_STONITH_DIR="$sbindir" AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) RH_STONITH_PREFIX="fence_" AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 HAVE_upstart=0 HAVE_systemd=0 PKG_CHECK_MODULES(DBUS, dbus-1, ,HAVE_dbus=0) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) if test $HAVE_dbus = 1; then CFLAGS="$CFLAGS `$PKGCONFIG --cflags dbus-1`" fi DBUS_LIBS="$CFLAGS `$PKGCONFIG --libs dbus-1`" AC_SUBST(DBUS_LIBS) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then AC_MSG_FAILURE([cannot enable Upstart without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then AC_MSG_CHECKING([for Upstart version query result via dbus-send]) ret=$(dbus-send --system --print-reply= --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "this borked") # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without DBus]) else enable_systemd=no fi else AC_MSG_CHECKING([for systemd path for system unit files]) PKG_CHECK_VAR([systemdunitdir], [systemd], [systemdsystemunitdir], [ AC_MSG_RESULT([${systemdunitdir}]) ],[ AC_MSG_RESULT([not found]) systemdunitdir=no ]) if test "x${systemdunitdir}" = xno; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd when systemdunitdir unresolved]) fi enable_systemd=no else enable_systemd=yes fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) if test "x${enable_systemd}" = xyes; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" fi AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) case $SUPPORT_NAGIOS in 1|yes|true|try) SUPPORT_NAGIOS=1;; *) SUPPORT_NAGIOS=0;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" dnl ======================================================================== dnl Cluster stack - Heartbeat dnl ======================================================================== case $SUPPORT_HEARTBEAT in 1|yes|true|try) AC_MSG_CHECKING(for heartbeat support) AC_CHECK_LIB(hbclient, ll_cluster_new, [SUPPORT_HEARTBEAT=1], [if test $SUPPORT_HEARTBEAT != try; then AC_MSG_FAILURE(Unable to support Heartbeat: client libraries not found) fi]) if test $SUPPORT_HEARTBEAT = 1 ; then STACKS="$STACKS heartbeat" dnl objdump -x ${libdir}/libccmclient.so | grep SONAME | awk '{print $2}' AC_DEFINE_UNQUOTED(CCM_LIBRARY, "libccmclient.so.1", Library to load for ccm support) AC_DEFINE_UNQUOTED(HEARTBEAT_LIBRARY, "libhbclient.so.1", Library to load for heartbeat support) BUILD_ATOMIC_ATTRD=0 else SUPPORT_HEARTBEAT=0 fi ;; *) SUPPORT_HEARTBEAT=0;; esac AM_CONDITIONAL(BUILD_HEARTBEAT_SUPPORT, test $SUPPORT_HEARTBEAT = 1) AC_DEFINE_UNQUOTED(SUPPORT_HEARTBEAT, $SUPPORT_HEARTBEAT, Support the Heartbeat messaging and membership layer) AC_SUBST(SUPPORT_HEARTBEAT) dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_CS=no;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" CS_USES_LIBQB=0 PCMK_SERVICE_ID=9 if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS, with '$CSPREFIX') SUPPORT_CS=1 PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap, HAVE_cmap=1, HAVE_cmap=0) PKG_CHECK_MODULES(cman, libcman, HAVE_cman=1, HAVE_cman=0) PKG_CHECK_MODULES(confdb, libconfdb, HAVE_confdb=1, HAVE_confdb=0) PKG_CHECK_MODULES(fenced, libfenced, HAVE_fenced=1, HAVE_fenced=0) PKG_CHECK_MODULES(quorum, libquorum, HAVE_quorum=1, HAVE_quorum=0) PKG_CHECK_MODULES(oldipc, libcoroipcc, HAVE_oldipc=1, HAVE_oldipc=0) if test $HAVE_oldipc = 1; then CFLAGS="$CFLAGS $oldipc_FLAGS $cpg_FLAGS $cfg_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $oldipc_LIBS $cpg_LIBS $cfg_LIBS" else CS_USES_LIBQB=1 CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS" AC_CHECK_LIB(corosync_common, cs_strerror) fi AC_DEFINE_UNQUOTED(HAVE_CONFDB, $HAVE_confdb, Have the old herarchial Corosync config API) AC_DEFINE_UNQUOTED(HAVE_CMAP, $HAVE_cmap, Have the new non-herarchial Corosync config API) fi if test $SUPPORT_CS = 1 -a x$HAVE_oldipc = x0 ; then dnl Support for plugins was removed about the time the IPC was dnl moved to libqb. dnl The only option now is the built-in quorum API CFLAGS="$CFLAGS $cmap_CFLAGS $quorum_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cmap_LIBS $quorum_LIBS" STACKS="$STACKS corosync-native" AC_DEFINE_UNQUOTED(SUPPORT_CS_QUORUM, 1, Support the consumption of membership and quorum from corosync) fi SUPPORT_PLUGIN=0 if test $SUPPORT_CS = 1 -a x$HAVE_confdb = x1; then dnl Need confdb to support cman and the plugins SUPPORT_PLUGIN=1 BUILD_ATOMIC_ATTRD=0 AC_MSG_CHECKING([for corosync path for plugins]) PKG_CHECK_VAR([LCRSODIR], [corosync], [lcrsodir], [ AC_MSG_RESULT([$LCRSODIR]) ],[ AC_SUBST([LCRSODIR], [$libdir]) AC_MSG_RESULT([$LCRSODIR (fallback)]) ]) STACKS="$STACKS corosync-plugin" COROSYNC_LIBS="$COROSYNC_LIBS $confdb_LIBS" if test $SUPPORT_CMAN != no; then if test $HAVE_cman = 1 -a $HAVE_fenced = 1; then SUPPORT_CMAN=1 STACKS="$STACKS cman" CFLAGS="$CFLAGS $cman_FLAGS $fenced_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cman_LIBS $fenced_LIBS" fi fi fi dnl Normalize SUPPORT_CS and SUPPORT_CMAN for use with #if directives if test $SUPPORT_CMAN != 1; then SUPPORT_CMAN=0 fi if test $SUPPORT_CS = 1; then CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" elif test $SUPPORT_CS != 0; then SUPPORT_CS=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support Corosync: $aisreason) else AC_MSG_FAILURE(Unable to support Corosync: $aisreason) fi fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AC_DEFINE_UNQUOTED(SUPPORT_CMAN, $SUPPORT_CMAN, Support the consumption of membership and quorum from cman) AC_DEFINE_UNQUOTED(CS_USES_LIBQB, $CS_USES_LIBQB, Does corosync use libqb for its ipc) AC_DEFINE_UNQUOTED(PCMK_SERVICE_ID, $PCMK_SERVICE_ID, Corosync service number) AC_DEFINE_UNQUOTED(SUPPORT_PLUGIN, $SUPPORT_PLUGIN, Support the Pacemaker plugin for Corosync) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AM_CONDITIONAL(BUILD_CS_PLUGIN, test $SUPPORT_PLUGIN = 1) AM_CONDITIONAL(BUILD_CMAN, test $SUPPORT_CMAN = 1) AM_CONDITIONAL(BUILD_ATOMIC_ATTRD, test $BUILD_ATOMIC_ATTRD = 1) AC_DEFINE_UNQUOTED(HAVE_ATOMIC_ATTRD, $BUILD_ATOMIC_ATTRD, Support the new atomic attrd) AC_SUBST(SUPPORT_CMAN) AC_SUBST(SUPPORT_CS) AC_SUBST(SUPPORT_PLUGIN) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported. Just building the Policy Engine) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack (heartbeat or corosync) ) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi if test ${BUILD_ATOMIC_ATTRD} = 1; then PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" fi AC_SUBST(CLUSTERLIBS) dnl ======================================================================== dnl SNMP dnl ======================================================================== case $SUPPORT_SNMP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_SNMP=no;; esac SNMPLIBS="" AC_MSG_CHECKING(for snmp support) if test $SUPPORT_SNMP = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_SNMP=0 else SNMPCONFIG="" AC_MSG_RESULT($SUPPORT_SNMP) AC_CHECK_HEADERS(net-snmp/net-snmp-config.h) if test "x${ac_cv_header_net_snmp_net_snmp_config_h}" != "xyes"; then SUPPORT_SNMP="no" fi if test $SUPPORT_SNMP != no; then AC_PATH_PROGS(SNMPCONFIG, net-snmp-config) if test "X${SNMPCONFIG}" = "X"; then AC_MSG_RESULT(You need the net_snmp development package to continue.) SUPPORT_SNMP=no fi fi if test $SUPPORT_SNMP != no; then AC_MSG_CHECKING(for special snmp libraries) SNMPLIBS=`$SNMPCONFIG --agent-libs` AC_MSG_RESULT($SNMPLIBS) fi if test $SUPPORT_SNMP != no; then savedLibs=$LIBS LIBS="$LIBS $SNMPLIBS" dnl On many systems libcrypto is needed when linking against libsnmp. dnl Check to see if it exists, and if so use it. dnl AC_CHECK_LIB(crypto, CRYPTO_free, CRYPTOLIB="-lcrypto",) dnl AC_SUBST(CRYPTOLIB) AC_CHECK_FUNCS(netsnmp_transport_open_client) if test $ac_cv_func_netsnmp_transport_open_client != yes; then AC_CHECK_FUNCS(netsnmp_tdomain_transport) if test $ac_cv_func_netsnmp_tdomain_transport != yes; then SUPPORT_SNMP=no else AC_DEFINE_UNQUOTED(NETSNMPV53, 1, [Use the older 5.3 version of the net-snmp API]) fi fi LIBS=$savedLibs fi if test $SUPPORT_SNMP = no; then SNMPLIBS="" SUPPORT_SNMP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support SNMP) else AC_MSG_FAILURE(Unable to support SNMP) fi else SUPPORT_SNMP=1 fi fi if test $SUPPORT_SNMP = 1; then PCMK_FEATURES="$PCMK_FEATURES snmp" fi AC_SUBST(SNMPLIBS) AM_CONDITIONAL(ENABLE_SNMP, test "$SUPPORT_SNMP" = "1") AC_DEFINE_UNQUOTED(ENABLE_SNMP, $SUPPORT_SNMP, Build in support for sending SNMP traps) dnl ======================================================================== dnl ESMTP dnl ======================================================================== case $SUPPORT_ESMTP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ESMTP=no;; esac ESMTPLIB="" AC_MSG_CHECKING(for esmtp support) if test $SUPPORT_ESMTP = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ESMTP=0 else ESMTPCONFIG="" AC_MSG_RESULT($SUPPORT_ESMTP) AC_CHECK_HEADERS(libesmtp.h) if test "x${ac_cv_header_libesmtp_h}" != "xyes"; then ENABLE_ESMTP="no" fi if test $SUPPORT_ESMTP != no; then AC_PATH_PROGS(ESMTPCONFIG, libesmtp-config) if test "X${ESMTPCONFIG}" = "X"; then AC_MSG_RESULT(You need the libesmtp development package to continue.) SUPPORT_ESMTP=no fi fi if test $SUPPORT_ESMTP != no; then AC_MSG_CHECKING(for special esmtp libraries) ESMTPLIBS=`$ESMTPCONFIG --libs | tr '\n' ' '` AC_MSG_RESULT($ESMTPLIBS) fi if test $SUPPORT_ESMTP = no; then SUPPORT_ESMTP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ESMTP) else AC_MSG_FAILURE(Unable to support ESMTP) fi else SUPPORT_ESMTP=1 PCMK_FEATURES="$PCMK_FEATURES libesmtp" fi fi AC_SUBST(ESMTPLIBS) AM_CONDITIONAL(ENABLE_ESMTP, test "$SUPPORT_ESMTP" = "1") AC_DEFINE_UNQUOTED(ENABLE_ESMTP, $SUPPORT_ESMTP, Build in support for sending mail notifications with ESMTP) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ACL=no;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) SUPPORT_ACL=1 AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then SUPPORT_ACL=0 fi if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1;; *) SUPPORT_CIBSECRETS=0;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) if test $SUPPORT_CIBSECRETS = 1; then PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) LRM_LEGACY_CIBSECRETS_DIR="${localstatedir}/lib/heartbeat/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_LEGACY_CIBSECRETS_DIR,"$LRM_LEGACY_CIBSECRETS_DIR", Legacy location for CIB secrets) AC_SUBST(LRM_LEGACY_CIBSECRETS_DIR) fi dnl ======================================================================== dnl GnuTLS dnl ======================================================================== AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program. dnl If no 'libgnutls-config', try traditional autoconf means. AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config) if test -n "$LIBGNUTLS_CONFIG"; then AC_MSG_CHECKING(for gnutls header flags) GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`"; AC_MSG_RESULT($GNUTLSHEAD) AC_MSG_CHECKING(for gnutls library flags) GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`"; AC_MSG_RESULT($GNUTLSLIBS) fi AC_CHECK_LIB(gnutls, gnutls_init) AC_CHECK_FUNCS(gnutls_priority_set_direct) AC_SUBST(GNUTLSHEAD) AC_SUBST(GNUTLSLIBS) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKGCONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKGCONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb" dnl when we don't have diagnostic push / pull we can't explicitely disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no SAVED_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -Werror" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) CFLAGS="$SAVED_CFLAGS" if cc_supports_flag "-Wformat-nonliteral"; then gcc_format_nonliteral=yes else gcc_format_nonliteral=no fi # We had to eliminate -Wnested-externs because of libtool changes EXTRA_FLAGS="-fgnu89-inline -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat-security -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wwrite-strings -Wunused-but-set-variable -Wunsigned-char" if test "x$gcc_diagnostic_push_pull" = "xyes"; then AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" else if test "x$gcc_format_nonliteral" = "xyes"; then EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" fi fi # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (attrd, cib, crmd, lrmd, stonithd, pacemakerd, pacemaker_remoted, dnl pengine) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi if test "x${HARDENING}" = "xno"; then AC_MSG_NOTICE([Hardening: explicitly disabled]) elif test "x${HARDENING}" = "xyes" \ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then dnl We'll figure out on our own... CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; relro=1] ) # daemons: PIE for both CFLAGS and LDFLAGS if cc_supports_flag -fPIE; then flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; pie=1] ) fi # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries if test "${relro}" = 1 && test "${pie}" = 1; then flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; bindnow=1] ) fi if test "${bindnow}" = 1; then flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"] ) fi # universal: prefer strong > all > default stack protector if possible flag= if cc_supports_flag -fstack-protector-strong; then flag="-fstack-protector-strong" elif cc_supports_flag -fstack-protector-all; then flag="-fstack-protector-all" elif cc_supports_flag -fstack-protector; then flag="-fstack-protector" fi if test -n "${flag}"; then CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 fi if test "${relro}" = 1 \ || test "${pie}" = 1 \ || test "${stackprot}" = 1; then AC_MSG_NOTICE( [Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) else AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) fi else AC_MSG_NOTICE([Hardening: using custom flags]) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff... AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--quiet" QUIET_MAKE_OPTS="--quiet" fi AC_MSG_RESULT(Supress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKE="${MAKE} \$(QUIET_MAKE_OPTS)" AC_SUBST(CC) AC_SUBST(MAKE) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_MAKE_OPTS) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ Doxyfile \ coverage.sh \ cts/Makefile \ cts/CTSvars.py \ cts/LSBDummy \ cts/HBDummy \ cts/benchmark/Makefile \ cts/benchmark/clubench \ cts/lxc_autogen.sh \ cib/Makefile \ attrd/Makefile \ crmd/Makefile \ pengine/Makefile \ pengine/regression.core.sh \ doc/Makefile \ doc/Clusters_from_Scratch/publican.cfg \ doc/Pacemaker_Development/publican.cfg \ doc/Pacemaker_Explained/publican.cfg \ doc/Pacemaker_Remote/publican.cfg \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ replace/Makefile \ lib/Makefile \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pengine.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/ais/Makefile \ lib/common/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/pengine/Makefile \ lib/transition/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ mcp/Makefile \ mcp/pacemaker \ mcp/pacemaker.service \ mcp/pacemaker.upstart \ mcp/pacemaker.combined.upstart \ fencing/Makefile \ fencing/regression.py \ lrmd/Makefile \ lrmd/regression.py \ lrmd/pacemaker_remote.service \ lrmd/pacemaker_remote \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ tools/Makefile \ tools/crm_report \ tools/report.common \ tools/cibsecret \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ lib/gnu/Makefile \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) if test $SUPPORT_PLUGIN = 1; then AC_MSG_RESULT([ Corosync Plugins = ${LCRSODIR}]) fi AC_MSG_RESULT([]) AC_MSG_RESULT([ Use system LTDL = ${ac_cv_lib_ltdl_lt_dlopen}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/doc/Pacemaker_Development/en-US/Ch-FAQ.txt b/doc/Pacemaker_Development/en-US/Ch-FAQ.txt index 490b46b06f..10be0e0fe3 100644 --- a/doc/Pacemaker_Development/en-US/Ch-FAQ.txt +++ b/doc/Pacemaker_Development/en-US/Ch-FAQ.txt @@ -1,112 +1,111 @@ = Frequently Asked Questions = [qanda] Who is this document intended for?:: Anyone who wishes to read and/or edit the Pacemaker source code. Casual contributors should feel free to read just this FAQ, and consult other chapters as needed. Where is the source code for Pacemaker?:: indexterm:[downloads] indexterm:[source code] indexterm:[git,GitHub] The https://github.com/ClusterLabs/pacemaker[source code for Pacemaker] is kept on https://github.com/[GitHub], as are all software projects under the https://github.com/ClusterLabs[ClusterLabs] umbrella. Pacemaker uses https://git-scm.com/[Git] for source code management. If you are a Git newbie, the http://schacon.github.io/git/gittutorial.html[gittutorial(7) man page] is an excellent starting point. If you're familiar with using Git from the command line, you can create a local copy of the Pacemaker source code with: `git clone https://github.com/ClusterLabs/pacemaker.git pacemaker` What are the different Git branches and repositories used for?:: indexterm:[branches] * The https://github.com/ClusterLabs/pacemaker/tree/master[master branch] is the primary branch used for development. * The https://github.com/ClusterLabs/pacemaker/tree/1.1[1.1 branch] contains the latest official release, and normally does not receive any changes. During the release cycle, it will contain release candidates for the next official release, and will receive only bug fixes. * The https://github.com/ClusterLabs/pacemaker-1.0[1.0 repository] is a frozen snapshot of the 1.0 release series, and is no longer developed. * Messages will be posted to the http://clusterlabs.org/mailman/listinfo/developers[developers@clusterlabs.org] mailing list during the release cycle, with instructions about which branches to use when submitting requests. How do I build from the source code?:: - See the - https://github.com/ClusterLabs/pacemaker/blob/master/README.markdown[README] + See https://github.com/ClusterLabs/pacemaker/blob/master/INSTALL.md[INSTALL.md] in the main checkout directory. What coding style should I follow?:: You'll be mostly fine if you simply follow the example of existing code. When unsure, see the relevant chapter of this document for language-specific recommendations. Pacemaker has grown and evolved organically over many years, so you will see much code that doesn't conform to the current guidelines. We discourage making changes solely to bring code into conformance, as any change requires developer time for review and opens the possibility of adding bugs. However, new code should follow the guidelines, and it is fine to bring lines of older code into conformance when modifying that code for other reasons. How should I format my Git commit messages?:: indexterm:[git,commit messages] See existing examples in the git log. The first line should look like +change-type: affected-code: explanation+ where +change-type+ can be +Fix+ or +Bug+ for most bug fixes, +Feature+ for new features, +Log+ for changes to log messages or handling, +Doc+ for changes to documentation or comments, or +Test+ for changes in CTS and regression tests. You will sometimes see +Low+, +Med+ (or +Mid+) and +High+ used instead for bug fixes, to indicate the severity. The important thing is that only commits with +Feature+, +Fix+, +Bug+, or +High+ will automatically be included in the change log for the next release. The +affected-code+ is the name of the component(s) being changed, for example, +crmd+ or +libcrmcommon+ (it's more free-form, so don't sweat getting it exact). The +explanation+ briefly describes the change. The git project recommends the entire summary line stay under 50 characters, but more is fine if needed for clarity. Except for the most simple and obvious of changes, the summary should be followed by a blank line and then a longer explanation of 'why' the change was made. How can I test my changes?:: Most importantly, Pacemaker has regression tests for most major components; these will automatically be run for any pull requests submitted through GitHub. Additionally, Pacemaker's Cluster Test Suite (CTS) can be used to set up a test cluster and run a wide variety of complex tests. This document will have more detail on testing in the future. What is Pacemaker's license?:: indexterm:[licensing] Except where noted otherwise in the file itself, the source code for all Pacemaker programs is licensed under version 2 or later of the GNU General Public License (https://www.gnu.org/licenses/gpl-2.0.html[GPLv2+]), its headers and libraries under version 2.1 or later of the less restrictive GNU Lesser General Public License (https://www.gnu.org/licenses/lgpl-2.1.html[LGPLv2.1+]), its documentation under version 4.0 or later of the Creative Commons Attribution-ShareAlike International Public License (https://creativecommons.org/licenses/by-sa/4.0/legalcode[CC-BY-SA]), and its init scripts under the https://opensource.org/licenses/BSD-3-Clause[Revised BSD] license. If you find any deviations from this policy, or wish to inquire about alternate licensing - arrangements, please e-mail pacemaker@oss.clusterlabs.org. + arrangements, please e-mail mailto:andrew@beekhof.net[andrew@beekhof.net]. Licensing issues are also discussed on the http://clusterlabs.org/wiki/License[ClusterLabs wiki]. How can I contribute my changes to the project?:: Contributions of bug fixes or new features are very much appreciated! Patches can be submitted as https://help.github.com/articles/using-pull-requests/[pull requests] via GitHub (the preferred method, due to its excellent https://github.com/features/[features]), or e-mailed to the http://clusterlabs.org/mailman/listinfo/developers[developers@clusterlabs.org] mailing list as an attachment in a format Git can import. What if I still have questions?:: indexterm:[mailing lists] Ask on the http://clusterlabs.org/mailman/listinfo/developers[developers@clusterlabs.org] mailing list for development-related questions, or on the http://clusterlabs.org/mailman/listinfo/users[users@clusterlabs.org] mailing list for general questions about using Pacemaker. Developers often also hang out on http://freenode.net/[freenode's] #clusterlabs IRC channel. diff --git a/doc/Pacemaker_Explained/en-US/Ap-Changes.txt b/doc/Pacemaker_Explained/en-US/Ap-Changes.txt deleted file mode 100644 index 3626753de6..0000000000 --- a/doc/Pacemaker_Explained/en-US/Ap-Changes.txt +++ /dev/null @@ -1,63 +0,0 @@ -[appendix] - - -== What Changed in 1.0 == - -=== New === - -* Failure timeouts. See <> -* New section for resource and operation defaults. See <> and <> -* Tool for making offline configuration changes. See <> -* +Rules, instance_attributes, meta_attributes+ and sets of operations can be defined once and referenced in multiple places. See <> -* The CIB now accepts XPath-based create/modify/delete operations. See the pass:[cibadmin] help text. -* Multi-dimensional colocation and ordering constraints. See <> and <> -* The ability to connect to the CIB from non-cluster machines. See <> -* Allow recurring actions to be triggered at known times. See <> - - -=== Changed === - -* Syntax -** All resource and cluster options now use dashes (-) instead of underscores (_) -** +master_slave+ was renamed to +master+ -** The +attributes+ container tag was removed -** The operation field +pre-req+ has been renamed +requires+ -** All operations must have an +interval+, +start+/+stop+ must have it set to zero -* The +stonith-enabled+ option now defaults to true. -* The cluster will refuse to start resources if +stonith-enabled+ is true (or unset) and no STONITH resources have been defined -* The attributes of colocation and ordering constraints were renamed for clarity. See <> and <> -* +resource-failure-stickiness+ has been replaced by +migration-threshold+. See <> -* The parameters for command-line tools have been made consistent -* Switched to 'RelaxNG' schema validation and 'libxml2' parser -** id fields are now XML IDs which have the following limitations: -*** id's cannot contain colons (:) -*** id's cannot begin with a number -*** id's must be globally unique (not just unique for that tag) -** Some fields (such as those in constraints that refer to resources) are IDREFs. -+ -This means that they must reference existing resources or objects in -order for the configuration to be valid. Removing an object which is -referenced elsewhere will therefore fail. -+ -** The CIB representation, from which a MD5 digest is calculated to verify CIBs on the nodes, has changed. -+ -This means that every CIB update will require a full refresh on any -upgraded nodes until the cluster is fully upgraded to 1.0. This will -result in significant performance degradation and it is therefore -highly inadvisable to run a mixed 1.0/0.6 cluster for any longer than -absolutely necessary. -+ -* Ping node information no longer needs to be added to _ha.cf_. -+ -Simply include the lists of hosts in your ping resource(s). - - -=== Removed === - - -* Syntax -** It is no longer possible to set resource meta options as top-level - attributes. Use meta attributes instead. -** Resource and operation defaults are no longer read from - +crm_config+. See <> and - <> instead. diff --git a/doc/Pacemaker_Explained/en-US/Ap-Upgrade-Config.txt b/doc/Pacemaker_Explained/en-US/Ap-Upgrade-Config.txt deleted file mode 100644 index 7f1eb06f37..0000000000 --- a/doc/Pacemaker_Explained/en-US/Ap-Upgrade-Config.txt +++ /dev/null @@ -1,130 +0,0 @@ -[appendix] - -== Upgrading the Configuration == - -This process was originally written for the upgrade from 0.6.'x' to 1.'y', -but the concepts should apply for any upgrade involving a change in -the XML schema version. - -indexterm:[Upgrading the Configuration] -indexterm:[Configuration,Upgrading] - -=== Perform the upgrade === - -==== Upgrade the software ==== - -Refer to the appendix: <> - -==== Upgrade the Configuration ==== - -As XML is not the friendliest of languages, it is common for cluster -administrators to have scripted some of their activities. In such -cases, it is likely that those scripts will not work with the new XML -syntax. - -In order to support such environments, it is actually possible to -continue using the old XML syntax. - -The downside is, however, that not all the new features will be -available and there is a performance impact since the cluster must do -a non-persistent configuration upgrade before each transition. So -while using the old syntax is possible, it is not advisable to -continue using it indefinitely. - -Even if you wish to continue using the old syntax, it is advisable to -follow the upgrade procedure (except for the last step) to ensure that the -cluster is able to use your existing configuration (since it will perform much -the same task internally). - -. Create a shadow copy to work with -+ ------ -# crm_shadow --create upgrade06 ------ -. Verify the configuration is valid indexterm:[Configuration,Verify]indexterm:[Verify,Configuration] -+ ------ -# crm_verify --live-check ------ -. Fix any errors or warnings -. Perform the upgrade: -+ ------ -# cibadmin --upgrade ------ -. If this step fails, there are three main possibilities: -.. The configuration was not valid to start with - go back to step 2 -.. The transformation failed - report a bug or mailto:pacemaker@oss.clusterlabs.org?subject=Transformation%20failed%20during%20upgrade[email the project] -.. The transformation was successful but produced an invalid result footnote:[ -The most common reason is ID values being repeated or invalid. Pacemaker 1.0 is much stricter regarding this type of validation. -] -+ -If the result of the transformation is invalid, you may see a number of errors -from the validation library. If these are not helpful, visit the -http://clusterlabs.org/wiki/Validation_FAQ[Validation FAQ wiki page] and/or try -the procedure described below under <> -+ -. Check the changes -+ ------ -# crm_shadow --diff ------ -+ -If at this point there is anything about the upgrade that you wish to fine-tune (for example, to change some of the automatic IDs) now is the time to do so. Since the shadow configuration is not in use by the cluster, it is safe to edit the file manually: -+ ------ -# crm_shadow --edit ------ -+ -This will open the configuration in your favorite editor (whichever is -specified by the standard *$EDITOR* environment variable) -+ -. Preview how the cluster will react: -+ ------- -# crm_simulate --live-check --save-dotfile upgrade06.dot -S -# graphviz upgrade06.dot ------- -+ -Verify that either no resource actions will occur or that you are -happy with any that are scheduled. If the output contains actions you -do not expect (possibly due to changes to the score calculations), you -may need to make further manual changes. See -<> for further details on how to interpret -the output of `crm_simulate` and `graphviz`. -+ -. Upload the changes -+ ------ -# crm_shadow --commit upgrade06 --force ------ -+ -In the unlikely event this step fails, please report a bug. - -[[s-upgrade-config-manual]] -==== Manually Upgrading the Configuration ==== - -indexterm:[Configuration,Upgrade manually] -It is also possible to perform the configuration upgrade steps manually: - -. Locate the +upgrade06.xsl+ conversion script provided with the source code - (the https://github.com/ClusterLabs/pacemaker/tree/master/xml/upgrade06.xsl[latest version] is available via - git). - -. Convert the XML blob: indexterm:[XML,Convert] -+ ------ -# xsltproc /path/to/upgrade06.xsl config06.xml > config10.xml ------ -+ -. Locate the +pacemaker.rng+ script. -. Check the XML validity: indexterm:[Validate Configuration]indexterm:[Configuration,Validate XML] -+ ----- -# xmllint --relaxng /path/to/pacemaker.rng config10.xml ----- - -The advantage of this method is that it can be performed without the -cluster running and any validation errors should be more informative -(despite being generated by the same library!) since they include line -numbers. diff --git a/doc/Pacemaker_Explained/en-US/Ap-Upgrade.txt b/doc/Pacemaker_Explained/en-US/Ap-Upgrade.txt index 66f5cc5936..f36f8159a3 100644 --- a/doc/Pacemaker_Explained/en-US/Ap-Upgrade.txt +++ b/doc/Pacemaker_Explained/en-US/Ap-Upgrade.txt @@ -1,193 +1,420 @@ [appendix] -[[ap-upgrade]] -== Upgrading Cluster Software == - -There will always be an upgrade path from any pacemaker 1._x_ -release to any other 1._y_ release. - -Consult the documentation for your messaging layer -(Heartbeat or Corosync) to see whether upgrading them to a -newer version is also supported. +== Upgrading == -There are three approaches to upgrading your cluster software: - -* Complete Cluster Shutdown -* Rolling (node by node) -* Disconnect and Reattach +[[ap-upgrade]] +=== Upgrading Cluster Software === -Each method has advantages and disadvantages, some of which are listed -in the table below, and you should choose the one most appropriate to -your needs. +There are three approaches to upgrading a cluster, each with advantages and +disadvantages. .Upgrade Methods -[width="95%",cols="6*",options="header",align="center"] +[width="95%",cols="s,6*",options="header",align="center"] |========================================================= -|Type -|Available between all software versions -|Service Outage During Upgrade -|Service Recovery During Upgrade -|Exercises Failover Logic/Configuration -|Allows change of cluster stack type -indexterm:[Cluster,Switching between Stacks] -indexterm:[Changing Cluster Stack] +|Method +|Available between all versions +|Can be used with Pacemaker Remote nodes +|Service outage during upgrade +|Service recovery during upgrade +|Exercises failover logic +|Allows change of messaging layer +indexterm:[Cluster,switching between stacks] +indexterm:[Changing cluster stack] footnote:[For example, switching from Heartbeat to Corosync.] -|Shutdown -indexterm:[Upgrade,Shutdown] -indexterm:[Shutdown Upgrade] +|Complete cluster shutdown +indexterm:[upgrade,shutdown] +indexterm:[shutdown upgrade] +|yes |yes |always |N/A |no |yes -|Rolling -indexterm:[Upgrade,Rolling] -indexterm:[Rolling Upgrade] +|Rolling (node by node) +indexterm:[upgrade,rolling] +indexterm:[rolling upgrade] |no +|yes |always |yes |yes |no -|Reattach -indexterm:[Upgrade,Reattach] -indexterm:[Reattach Upgrade] +|Detach and reattach +indexterm:[upgrade,reattach] +indexterm:[reattach upgrade] |yes +|no |only due to failure |no |no |yes |========================================================= -=== Complete Cluster Shutdown === +==== Complete Cluster Shutdown ==== In this scenario, one shuts down all cluster nodes and resources, then upgrades all the nodes before restarting the cluster. . On each node: .. Shutdown the cluster software (pacemaker and the messaging layer). .. Upgrade the Pacemaker software. This may also include upgrading the messaging layer and/or the underlying operating system. -.. Check the configuration manually or with the `crm_verify` tool if available. +.. Check the configuration with the `crm_verify` tool. . On each node: .. Start the cluster software. The messaging layer can be either Corosync or Heartbeat and does not need to be the same one before the upgrade. -=== Rolling (node by node) === +One variation of this approach is to build a new cluster on new hosts. +This allows the new version to be tested beforehand, and minimizes downtime by +having the new nodes ready to be placed in production as soon as the old nodes +are shut down. + +==== Rolling (node by node) ==== -In this scenario, each node is removed from the cluster, upgraded and then -brought back online until all nodes are running the newest version. +In this scenario, each node is removed from the cluster, upgraded, and then +brought back online, until all nodes are running the newest version. -Rolling upgrades should always be possible for pacemaker versions -1.0.0 and later. +If you plan to upgrade other cluster software -- such as the messaging layer -- +at the same time, consult that software's documentation for its compatibility +with a rolling upgrade. + +Pacemaker has three version numbers that affect rolling upgrades: + +* *Pacemaker release version:* Rolling upgrades are possible as long as the + major version number (the _x_ in _x.y.z_) stays the same. For example, + a rolling upgrade may be done from 1.0.8 to 1.1.15, but not from + 0.6.7 to 1.0.0. + +* *CRM feature set:* This version number applies to the communication between + full cluster nodes. ++ +It increases when a cluster node running the older version would have +problems if the cluster's Designated Controller (DC) has the newer version. +To avoid these problems, Pacemaker ensures that the longest-running node is the +DC, and that nodes with an older feature set cannot join the cluster. ++ +Therefore, if the CRM feature set is changing in the Pacemaker version you +are upgrading to, you should run a mixed-version cluster only during a small +rolling upgrade window. Otherwise, if one of the older nodes drops out of the +cluster for any reason, it will not be able to rejoin until it is upgraded. + +* *LRMD protocol version:* This version number applies to communication between a + Pacemaker Remote node and the cluster. It increases when an older cluster + node would have problems hosting the connection to a newer Pacemaker Remote + node. To avoid these problems, Pacemaker Remote nodes will accept connections + only from cluster nodes with the same or newer LRMD protocol version. ++ +For rolling upgrades, this means that all cluster nodes should be upgraded +before upgrading any Pacemaker Remote nodes. ++ +Unlike with CRM feature set differences between full cluster nodes, +mixed LRMD protocol versions between Pacemaker Remote nodes and full cluster +nodes are fine, as long as the Pacemaker Remote nodes have the older version. +This can be useful, for example, to host a legacy application in an +older operating system version used as a Pacemaker Remote node. + +See the ClusterLabs wiki's +http://clusterlabs.org/wiki/ReleaseCalendar[Release Calendar] to figure out +whether the CRM feature set and/or LRMD protocol version changed between the +the Pacemaker release versions in your rolling upgrade. + +[WARNING] +==== +The interpretation of the LRMD protocol version changed in Pacemaker 1.1.15. +If you are planning a rolling upgrade from an earlier Pacemaker version to +Pacemaker 1.1.15 or later involving Pacemaker Remote nodes, you will need to +take special precautions to avoid problems. See +http://clusterlabs.org/wiki/Upgrading_to_Pacemaker_1.1.15_or_later_from_an_earlier_version[Upgrading +to Pacemaker 1.1.15 or later from an earlier version] on the ClusterLabs wiki. +==== -On each node: +To perform a rolling upgrade, on each node in turn: . Put the node into standby mode, and wait for any active resources - to be moved cleanly to another node. + to be moved cleanly to another node. (This step is optional, but + allows you to deal with any resource issues before the upgrade.) . Shutdown the cluster software (pacemaker and the messaging layer) on the node. . Upgrade the Pacemaker software. This may also include upgrading the messaging layer and/or the underlying operating system. -. If this is the first node to be upgraded, check the configuration manually - or with the `crm_verify` tool if available. +. If this is the first node to be upgraded, check the configuration + with the `crm_verify` tool. . Start the messaging layer. This must be the same messaging layer (Corosync or Heartbeat) - that the rest of the cluster is using. Upgrading the messaging layer - may also be possible; consult the documentation for those - projects to see whether the two versions will be compatible. + that the rest of the cluster is using. [NOTE] ==== Rolling upgrades were not always possible with older heartbeat and -pacemaker versions. The table below shows which versions were -compatible during rolling upgrades. Rolling upgrades that cross compatibility -boundaries must be performed in multiple steps (for example, -upgrading heartbeat 2.0.6 to heartbeat 2.1.3, and then upgrading again -to pacemaker 0.6.6). Rolling upgrades from pacemaker 0._x_ to 1._y_ are not -possible. +pacemaker versions. Rolling upgrades that cross compatibility +boundaries listed in the following table must be performed in multiple steps. .Version Compatibility Table [width="95%",cols="2*",options="header",align="center"] |========================================================= |Version being Installed |Oldest Compatible Version -|Pacemaker 1.0.x +|Pacemaker 1.x.y |Pacemaker 1.0.0 |Pacemaker 0.7.x |Pacemaker 0.6 or Heartbeat 2.1.3 |Pacemaker 0.6.x |Heartbeat 2.0.8 |Heartbeat 2.1.3 (or less) |Heartbeat 2.0.4 |Heartbeat 2.0.4 (or less) |Heartbeat 2.0.0 |Heartbeat 2.0.0 |None. Use an alternate upgrade strategy. |========================================================= ==== -=== Disconnect and Reattach === +==== Detach and Reattach ==== The reattach method is a variant of a complete cluster shutdown, where the resources are left active and get re-detected when the cluster is restarted. +This method may not be used if the cluster contains any Pacemaker Remote nodes. + . Tell the cluster to stop managing services. This is required to allow the services to remain active after the cluster shuts down. + ---- -# crm_attribute -t crm_config -n is-managed-default -v false +# crm_attribute --type rsc_defaults --name is-managed --update false ---- . For any resource that has a value for +is-managed+, make sure it is set to +false+ so that the cluster will not stop it (replacing $rsc_id appropriately): + ---- # crm_resource -t primitive -r $rsc_id -p is-managed -v false ---- -. On each node: -.. Shutdown the cluster software (pacemaker and the messaging layer). -.. Upgrade the Pacemaker software. This may also include upgrading the - messaging layer and/or the underlying operating system. -. Check the configuration manually or with the `crm_verify` tool if available. -. On each node: -.. Start the cluster software. The messaging layer can be either Corosync or - Heartbeat and does not need to be the same one as before the upgrade. - +. On each node, shutdown the cluster software (pacemaker and the messaging + layer), and upgrade the Pacemaker software. This may also include upgrading + the messaging layer. While the underlying operating system may be upgraded + at the same time, that will be more likely to cause outages in the detached + services (certainly, if a reboot is required). +. Check the configuration with the `crm_verify` tool. +. On each node, start the cluster software. The messaging layer can be either + Corosync or Heartbeat and does not need to be the same one as before the + upgrade. . Verify that the cluster re-detected all resources correctly. . Allow the cluster to resume managing resources again: + ---- -# crm_attribute -t crm_config -n is-managed-default -v true +# crm_attribute --type rsc_defaults --name is-managed --update true ---- . For any resource that has a value for +is-managed+, reset it to - +true+ (so the cluster can recover the service if it fails) if - desired: + +true+ if desired, to allow the cluster can recover the service if it fails: + ---- # crm_resource -t primitive -r $rsc_id -p is-managed -v true ---- -[NOTE] -The oldest version of the CRM to support this upgrade type was in Heartbeat 2.0.4. - [IMPORTANT] =========== Always check your existing configuration is still compatible with the version you are installing before starting the cluster. =========== + +=== Upgrading the Configuration === + +indexterm:[upgrade,Configuration] +indexterm:[Configuration,upgrading] + +Pacemaker's configuration -- the Configuration Information Base (CIB) -- has +its own XML schema version, independent of the Pacemaker software version. + +After cluster software is upgraded, the cluster will continue to use +the older schema version that it was previously using. This can be useful, for +example, when administrators have written tools that modify the configuration, +and are based on the older syntax. + +However, when using an older syntax, new features may be unavailable, and there +is a performance impact, since the cluster must do a non-persistent +configuration upgrade before each transition. So while using the old syntax is +possible, it is not advisable to continue using it indefinitely. + +Even if you wish to continue using the old syntax, it is a good idea to +follow the upgrade procedure outlined below, except for the last step, to ensure +that the new software has no problems with your existing configuration (since it +will perform much the same task internally). + +If you are brave, it is sufficient simply to run `cibadmin --upgrade`. + +A more cautious approach would proceed like this: + +. Create a shadow copy of the configuration. The later commands will automatically + operate on this copy, rather than the live configuration. ++ +----- +# crm_shadow --create shadow +----- +. Verify the configuration is valid with the new software (which may be + stricter about syntax mistakes, or may have dropped support for deprecated + features): +indexterm:[Configuration,verify] +indexterm:[verify,Configuration] ++ +----- +# crm_verify --live-check +----- +. Fix any errors or warnings. +. Perform the upgrade: ++ +----- +# cibadmin --upgrade +----- +. If this step fails, there are three main possibilities: +.. The configuration was not valid to start with (did you do steps 2 and 3?). +.. The transformation failed - http://bugs.clusterlabs.org/[report a bug] or + mailto:users@clusterlabs.org?subject=Transformation%20failed%20during%20upgrade[email the project]. +.. The transformation was successful but produced an invalid result. ++ +If the result of the transformation is invalid, you may see a number of errors +from the validation library. If these are not helpful, visit the +http://clusterlabs.org/wiki/Validation_FAQ[Validation FAQ wiki page] and/or try +the procedure described below under <>. ++ +. Check the changes: ++ +----- +# crm_shadow --diff +----- ++ +If at this point there is anything about the upgrade that you wish to fine-tune +(for example, to change some of the automatic IDs), now is the time to do so: ++ +----- +# crm_shadow --edit +----- ++ +This will open the configuration in your favorite editor (whichever is +specified by the standard *$EDITOR* environment variable). ++ +. Preview how the cluster will react: ++ +------ +# crm_simulate --live-check --save-dotfile shadow.dot -S +# graphviz shadow.dot +------ ++ +Verify that either no resource actions will occur or that you are +happy with any that are scheduled. If the output contains actions you +do not expect (possibly due to changes to the score calculations), you +may need to make further manual changes. See +<> for further details on how to interpret +the output of `crm_simulate` and `graphviz`. ++ +. Upload the changes: ++ +----- +# crm_shadow --commit shadow --force +----- ++ +In the unlikely event this step fails, please report a bug. + +[NOTE] +==== +[[s-upgrade-config-manual]] +indexterm:[Configuration,upgrade manually] +It is also possible to perform the configuration upgrade steps manually: + +. Locate the +upgrade*.xsl+ conversion scripts provided with the source code. These will often + be installed in a location such as +/usr/share/pacemaker+, or may be obtained from + the https://github.com/ClusterLabs/pacemaker/tree/master/xml[source repository]. + +. Run the conversion scripts that apply to your older version, for example: + indexterm:[XML,convert] ++ +----- +# xsltproc /path/to/upgrade06.xsl config06.xml > config10.xml +----- ++ +. Locate the +pacemaker.rng+ script (from the same location as the xsl files). +. Check the XML validity: indexterm:[validate configuration]indexterm:[Configuration,validate XML] ++ +---- +# xmllint --relaxng /path/to/pacemaker.rng config10.xml +---- + +The advantage of this method is that it can be performed without the +cluster running, and any validation errors are often more informative. +==== + +=== What Changed in 1.0 === + +==== New ==== + +* Failure timeouts. See <> +* New section for resource and operation defaults. See <> and <> +* Tool for making offline configuration changes. See <> +* +Rules, instance_attributes, meta_attributes+ and sets of operations can be defined once and referenced in multiple places. See <> +* The CIB now accepts XPath-based create/modify/delete operations. See the pass:[cibadmin] help text. +* Multi-dimensional colocation and ordering constraints. See <> and <> +* The ability to connect to the CIB from non-cluster machines. See <> +* Allow recurring actions to be triggered at known times. See <> + + +==== Changed ==== + +* Syntax +** All resource and cluster options now use dashes (-) instead of underscores (_) +** +master_slave+ was renamed to +master+ +** The +attributes+ container tag was removed +** The operation field +pre-req+ has been renamed +requires+ +** All operations must have an +interval+, +start+/+stop+ must have it set to zero +* The +stonith-enabled+ option now defaults to true. +* The cluster will refuse to start resources if +stonith-enabled+ is true (or unset) and no STONITH resources have been defined +* The attributes of colocation and ordering constraints were renamed for clarity. See <> and <> +* +resource-failure-stickiness+ has been replaced by +migration-threshold+. See <> +* The parameters for command-line tools have been made consistent +* Switched to 'RelaxNG' schema validation and 'libxml2' parser +** id fields are now XML IDs which have the following limitations: +*** id's cannot contain colons (:) +*** id's cannot begin with a number +*** id's must be globally unique (not just unique for that tag) +** Some fields (such as those in constraints that refer to resources) are IDREFs. ++ +This means that they must reference existing resources or objects in +order for the configuration to be valid. Removing an object which is +referenced elsewhere will therefore fail. ++ +** The CIB representation, from which a MD5 digest is calculated to verify CIBs on the nodes, has changed. ++ +This means that every CIB update will require a full refresh on any +upgraded nodes until the cluster is fully upgraded to 1.0. This will +result in significant performance degradation and it is therefore +highly inadvisable to run a mixed 1.0/0.6 cluster for any longer than +absolutely necessary. ++ +* Ping node information no longer needs to be added to _ha.cf_. ++ +Simply include the lists of hosts in your ping resource(s). + + +==== Removed ==== + + +* Syntax +** It is no longer possible to set resource meta options as top-level + attributes. Use meta attributes instead. +** Resource and operation defaults are no longer read from + +crm_config+. See <> and + <> instead. diff --git a/doc/Pacemaker_Explained/en-US/Book_Info.xml b/doc/Pacemaker_Explained/en-US/Book_Info.xml index c189d07a6c..de4ddbebe4 100644 --- a/doc/Pacemaker_Explained/en-US/Book_Info.xml +++ b/doc/Pacemaker_Explained/en-US/Book_Info.xml @@ -1,35 +1,35 @@ Configuration Explained An A-Z guide to Pacemaker's Configuration Options Pacemaker 1.1 7 - 0 + 1 The purpose of this document is to definitively explain the concepts used to configure Pacemaker. To achieve this, it will focus exclusively on the XML syntax used to configure Pacemaker's Cluster Information Base (CIB). diff --git a/doc/Pacemaker_Explained/en-US/Ch-Advanced-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Advanced-Options.txt index 945a53c2b1..9527b1ab18 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Advanced-Options.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Advanced-Options.txt @@ -1,716 +1,821 @@ = Advanced Configuration = [[s-remote-connection]] == Connecting from a Remote Machine == indexterm:[Cluster,Remote connection] indexterm:[Cluster,Remote administration] Provided Pacemaker is installed on a machine, it is possible to connect to the cluster even if the machine itself is not in the same cluster. To do this, one simply sets up a number of environment variables and runs the same commands as when working on a cluster node. .Environment Variables Used to Connect to Remote Instances of the CIB [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Environment Variable |Default |Description |CIB_user |$USER |The user to connect as. Needs to be part of the +hacluster+ group on the target host. indexterm:[Environment Variable,CIB_user] |CIB_passwd | |The user's password. Read from the command line if unset. indexterm:[Environment Variable,CIB_passwd] |CIB_server |localhost |The host to contact indexterm:[Environment Variable,CIB_server] |CIB_port | |The port on which to contact the server; required. indexterm:[Environment Variable,CIB_port] |CIB_encrypted |TRUE |Whether to encrypt network traffic indexterm:[Environment Variable,CIB_encrypted] |========================================================= So, if *c001n01* is an active cluster node and is listening on port 1234 for connections, and *someuser* is a member of the *hacluster* group, then the following would prompt for *someuser*'s password and return the cluster's current configuration: ---- # export CIB_port=1234; export CIB_server=c001n01; export CIB_user=someuser; # cibadmin -Q ---- For security reasons, the cluster does not listen for remote connections by default. If you wish to allow remote access, you need to set the +remote-tls-port+ (encrypted) or +remote-clear-port+ (unencrypted) CIB properties (i.e., those kept in the +cib+ tag, like +num_updates+ and +epoch+). .Extra top-level CIB properties for remote access [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |remote-tls-port |_none_ |Listen for encrypted remote connections on this port. indexterm:[remote-tls-port,Remote Connection Option] indexterm:[Remote Connection,Option,remote-tls-port] |remote-clear-port |_none_ |Listen for plaintext remote connections on this port. indexterm:[remote-clear-port,Remote Connection Option] indexterm:[Remote Connection,Option,remote-clear-port] |========================================================= [[s-recurring-start]] == Specifying When Recurring Actions are Performed == By default, recurring actions are scheduled relative to when the resource started. So if your resource was last started at 14:32 and you have a backup set to be performed every 24 hours, then the backup will always run in the middle of the business day -- hardly desirable. To specify a date and time that the operation should be relative to, set the operation's +interval-origin+. The cluster uses this point to calculate the correct +start-delay+ such that the operation will occur at _origin + (interval * N)_. So, if the operation's interval is 24h, its interval-origin is set to 02:00 and it is currently 14:32, then the cluster would initiate the operation with a start delay of 11 hours and 28 minutes. If the resource is moved to another node before 2am, then the operation is cancelled. The value specified for +interval+ and +interval-origin+ can be any date/time conforming to the http://en.wikipedia.org/wiki/ISO_8601[ISO8601 standard]. By way of example, to specify an operation that would run on the first Monday of 2009 and every Monday after that, you would add: .Specifying a Base for Recurring Action Intervals ===== [source,XML] ===== == Moving Resources == indexterm:[Moving,Resources] indexterm:[Resource,Moving] === Moving Resources Manually === There are primarily two occasions when you would want to move a resource from its current location: when the whole node is under maintenance, and when a single resource needs to be moved. ==== Standby Mode ==== Since everything eventually comes down to a score, you could create constraints for every resource to prevent them from running on one node. While pacemaker configuration can seem convoluted at times, not even we would require this of administrators. Instead, one can set a special node attribute which tells the cluster "don't let anything run here". There is even a helpful tool to help query and set it, called `crm_standby`. To check the standby status of the current machine, run: ---- # crm_standby -G ---- A value of +on+ indicates that the node is _not_ able to host any resources, while a value of +off+ says that it _can_. You can also check the status of other nodes in the cluster by specifying the `--node` option: ---- # crm_standby -G --node sles-2 ---- To change the current node's standby status, use `-v` instead of `-G`: ---- # crm_standby -v on ---- Again, you can change another host's value by supplying a hostname with `--node`. ==== Moving One Resource ==== When only one resource is required to move, we could do this by creating location constraints. However, once again we provide a user-friendly shortcut as part of the `crm_resource` command, which creates and modifies the extra constraints for you. If +Email+ were running on +sles-1+ and you wanted it moved to a specific location, the command would look something like: ---- # crm_resource -M -r Email -H sles-2 ---- Behind the scenes, the tool will create the following location constraint: [source,XML] It is important to note that subsequent invocations of `crm_resource -M` are not cumulative. So, if you ran these commands ---- # crm_resource -M -r Email -H sles-2 # crm_resource -M -r Email -H sles-3 ---- then it is as if you had never performed the first command. To allow the resource to move back again, use: ---- # crm_resource -U -r Email ---- Note the use of the word _allow_. The resource can move back to its original location but, depending on +resource-stickiness+, it might stay where it is. To be absolutely certain that it moves back to +sles-1+, move it there before issuing the call to `crm_resource -U`: ---- # crm_resource -M -r Email -H sles-1 # crm_resource -U -r Email ---- Alternatively, if you only care that the resource should be moved from its current location, try: ---- # crm_resource -B -r Email ---- Which will instead create a negative constraint, like [source,XML] This will achieve the desired effect, but will also have long-term consequences. As the tool will warn you, the creation of a +-INFINITY+ constraint will prevent the resource from running on that node until `crm_resource -U` is used. This includes the situation where every other cluster node is no longer available! In some cases, such as when +resource-stickiness+ is set to +INFINITY+, it is possible that you will end up with the problem described in <>. The tool can detect some of these cases and deals with them by creating both positive and negative constraints. E.g. +Email+ prefers +sles-1+ with a score of +-INFINITY+ +Email+ prefers +sles-2+ with a score of +INFINITY+ which has the same long-term consequences as discussed earlier. [[s-failure-migration]] === Moving Resources Due to Failure === Normally, if a running resource fails, pacemaker will try to start it again on the same node. However if a resource fails repeatedly, it is possible that there is an underlying problem on that node, and you might desire trying a different node in such a case. indexterm:[migration-threshold] indexterm:[failure-timeout] indexterm:[start-failure-is-fatal] Pacemaker allows you to set your preference via the +migration-threshold+ resource option. footnote:[ The naming of this option was perhaps unfortunate as it is easily confused with live migration, the process of moving a resource from one node to another without stopping it. Xen virtual guests are the most common example of resources that can be migrated in this manner. ] Simply define +migration-threshold=pass:[N]+ for a resource and it will migrate to a new node after 'N' failures. There is no threshold defined by default. To determine the resource's current failure status and limits, run `crm_mon --failcounts`. By default, once the threshold has been reached, the troublesome node will no longer be allowed to run the failed resource until the administrator manually resets the resource's failcount using `crm_failcount` (after hopefully first fixing the failure's cause). Alternatively, it is possible to expire them by setting the +failure-timeout+ option for the resource. For example, a setting of +migration-threshold=2+ and +failure-timeout=60s+ would cause the resource to move to a new node after 2 failures, and allow it to move back (depending on stickiness and constraint scores) after one minute. There are two exceptions to the migration threshold concept: when a resource either fails to start or fails to stop. If the cluster property +start-failure-is-fatal+ is set to +true+ (which is the default), start failures cause the failcount to be set to +INFINITY+ and thus always cause the resource to move immediately. Stop failures are slightly different and crucial. If a resource fails to stop and STONITH is enabled, then the cluster will fence the node in order to be able to start the resource elsewhere. If STONITH is not enabled, then the cluster has no way to continue and will not try to start the resource elsewhere, but will try to stop it again after the failure timeout. [IMPORTANT] Please read <> to understand how timeouts work before configuring a +failure-timeout+. === Moving Resources Due to Connectivity Changes === You can configure the cluster to move resources when external connectivity is lost in two steps. ==== Tell Pacemaker to Monitor Connectivity ==== First, add an *ocf:pacemaker:ping* resource to the cluster. The *ping* resource uses the system utility of the same name to a test whether list of machines (specified by DNS hostname or IPv4/IPv6 address) are reachable and uses the results to maintain a node attribute called +pingd+ by default. footnote:[ The attribute name is customizable, in order to allow multiple ping groups to be defined. ] [NOTE] =========== Older versions of Heartbeat required users to add ping nodes to +ha.cf+, but this is no longer required. Older versions of Pacemaker used a different agent *ocf:pacemaker:pingd* which is now deprecated in favor of *ping*. If your version of Pacemaker does not contain the *ping* resource agent, download the latest version from https://github.com/ClusterLabs/pacemaker/tree/master/extra/resources/ping =========== Normally, the ping resource should run on all cluster nodes, which means that you'll need to create a clone. A template for this can be found below along with a description of the most interesting parameters. .Common Options for a 'ping' Resource [width="95%",cols="1m,4<",options="header",align="center"] |========================================================= |Field |Description |dampen |The time to wait (dampening) for further changes to occur. Use this to prevent a resource from bouncing around the cluster when cluster nodes notice the loss of connectivity at slightly different times. indexterm:[dampen,Ping Resource Option] indexterm:[Ping Resource,Option,dampen] |multiplier |The number of connected ping nodes gets multiplied by this value to get a score. Useful when there are multiple ping nodes configured. indexterm:[multiplier,Ping Resource Option] indexterm:[Ping Resource,Option,multiplier] |host_list |The machines to contact in order to determine the current connectivity status. Allowed values include resolvable DNS host names, IPv4 and IPv6 addresses. indexterm:[host_list,Ping Resource Option] indexterm:[Ping Resource,Option,host_list] |========================================================= .An example ping cluster resource that checks node connectivity once every minute ===== [source,XML] ------------ ------------ ===== [IMPORTANT] =========== You're only half done. The next section deals with telling Pacemaker how to deal with the connectivity status that +ocf:pacemaker:ping+ is recording. =========== ==== Tell Pacemaker How to Interpret the Connectivity Data ==== [IMPORTANT] ====== Before attempting the following, make sure you understand <>. ====== There are a number of ways to use the connectivity data. The most common setup is for people to have a single ping target (e.g. the service network's default gateway), to prevent the cluster from running a resource on any unconnected node. .Don't run a resource on unconnected nodes ===== [source,XML] ------- ------- ===== A more complex setup is to have a number of ping targets configured. You can require the cluster to only run resources on nodes that can connect to all (or a minimum subset) of them. .Run only on nodes connected to three or more ping targets. ===== [source,XML] ------- ... ... ... ------- ===== Alternatively, you can tell the cluster only to _prefer_ nodes with the best connectivity. Just be sure to set +multiplier+ to a value higher than that of +resource-stickiness+ (and don't set either of them to +INFINITY+). .Prefer the node with the most connected ping nodes ===== [source,XML] ------- ------- ===== It is perhaps easier to think of this in terms of the simple constraints that the cluster translates it into. For example, if *sles-1* is connected to all five ping nodes but *sles-2* is only connected to two, then it would be as if you instead had the following constraints in your configuration: .How the cluster translates the above location constraint ===== [source,XML] ------- ------- ===== The advantage is that you don't have to manually update any constraints whenever your network connectivity changes. You can also combine the concepts above into something even more complex. The example below shows how you can prefer the node with the most connected ping nodes provided they have connectivity to at least three (again assuming that +multiplier+ is set to 1000). .A more complex example of choosing a location based on connectivity ===== [source,XML] ------- ------- ===== [[s-migrating-resources]] === Migrating Resources === Normally, when the cluster needs to move a resource, it fully restarts the resource (i.e. stops the resource on the current node and starts it on the new node). However, some types of resources, such as Xen virtual guests, are able to move to another location without loss of state (often referred to as live migration or hot migration). In pacemaker, this is called resource migration. Pacemaker can be configured to migrate a resource when moving it, rather than restarting it. Not all resources are able to migrate; see the Migration Checklist below, and those that can, won't do so in all situations. Conceptually, there are two requirements from which the other prerequisites follow: * The resource must be active and healthy at the old location; and * everything required for the resource to run must be available on both the old and new locations. The cluster is able to accommodate both 'push' and 'pull' migration models by requiring the resource agent to support two special actions: +migrate_to+ (performed on the current location) and +migrate_from+ (performed on the destination). In push migration, the process on the current location transfers the resource to the new location where is it later activated. In this scenario, most of the work would be done in the +migrate_to+ action and, if anything, the activation would occur during +migrate_from+. Conversely for pull, the +migrate_to+ action is practically empty and +migrate_from+ does most of the work, extracting the relevant resource state from the old location and activating it. There is no wrong or right way for a resource agent to implement migration, as long as it works. .Migration Checklist * The resource may not be a clone. * The resource must use an OCF style agent. * The resource must not be in a failed or degraded state. * The resource agent must support +migrate_to+ and +migrate_from+ actions, and advertise them in its metadata. * The resource must have the +allow-migrate+ meta-attribute set to +true+ (which is not the default). If an otherwise migratable resource depends on another resource via an ordering constraint, there are special situations in which it will be restarted rather than migrated. For example, if the resource depends on a clone, and at the time the resource needs to be moved, the clone has instances that are stopping and instances that are starting, then the resource will be restarted. The Policy Engine is not yet able to model this situation correctly and so takes the safer (if less optimal) path. In pacemaker 1.1.11 and earlier, a migratable resource will be restarted when moving if it directly or indirectly depends on 'any' primitive or group resources. Even in newer versions, if a migratable resource depends on a non-migratable resource, and both need to be moved, the migratable resource will be restarted. +[[s-node-health]] +== Tracking Node Health == + +A node may be functioning adequately as far as cluster membership is concerned, +and yet be "unhealthy" in some respect that makes it an undesirable location +for resources. For example, a disk drive may be reporting SMART errors, or the +CPU may be highly loaded. + +Pacemaker offers a way to automatically move resources off unhealthy nodes. + +=== Node Health Attributes === + +Pacemaker will treat any node attribute whose name starts with +#health+ as an +indicator of node health. Node health attributes may have one of the following +values: + +.Allowed Values for Node Health Attributes +[width="95%",cols="1,3<",options="header",align="center"] +|========================================================= + +|Value +|Intended significance + +|+red+ +|This indicator is unhealthy + indexterm:[Node health,red] + +|+yellow+ +|This indicator is becoming unhealthy + indexterm:[Node health,yellow] + +|+green+ +|This indicator is healthy + indexterm:[Node health,green] + +|'integer' +|A numeric score to apply to all resources on this node + (0 or positive is healthy, negative is unhealthy) + indexterm:[Node health,score] + +|========================================================= + +=== Node Health Strategy === + +Pacemaker assigns a node health score to each node, as the sum of the values of +all its node health attributes. This score will be used as a location +constraint applied to this node for all resources. + +The +node-health-strategy+ cluster option controls how Pacemaker responds to +changes in node health attributes, and how it translates +red+, +yellow+, and ++green+ to scores. + +Allowed values are: + +.Node Health Strategies +[width="95%",cols="1m,3<",options="header",align="center"] +|========================================================= + +|Value +|Effect + +|none +|Do not track node health attributes at all. + indexterm:[Node health,none] + +|migrate-on-red +|Assign the value of +-INFINITY+ to +red+, and 0 to +yellow+ and +green+. + This will cause all resources to move off the node if any attribute is +red+. + indexterm:[Node health,migrate-on-red] + +|only-green +|Assign the value of +-INFINITY+ to +red+ and +yellow+, and 0 to +green+. + This will cause all resources to move off the node if any attribute is +red+ + or +yellow+. + indexterm:[Node health,only-green] + +|progressive +|Assign the value of the +node-health-red+ cluster option to +red+, the value + of +node-health-yellow+ to +yellow+, and the value of +node-health-green+ to + +green+. This strategy gives the administrator finer control over how + important each value is. + indexterm:[Node health,progressive] + +|custom +|Track node health attributes using the same values as +progressive+ for + +red+, +yellow+, and +green+, but do not take them into account. + The administrator is expected to implement a policy by defining rules + (see <>) referencing node health attributes. + indexterm:[Node health,custom] + +|========================================================= + +=== Measuring Node Health === + +Since Pacemaker calculates node health based on node attributes, +any method that sets node attributes may be used to measure node +health. The most common ways are resource agents or separate daemons. + +Pacemaker provides examples that can be used directly or as a basis for +custom code. The +ocf:pacemaker:HealthCPU+ and +ocf:pacemaker:HealthSMART+ +resource agents set node health attributes based on CPU and disk parameters. +The +ipmiservicelogd+ daemon sets node health attributes based on IPMI +values (the +ocf:pacemaker:SystemHealth+ resource agent can be used to manage +the daemon as a cluster resource). + [[s-reusing-config-elements]] == Reusing Rules, Options and Sets of Operations == Sometimes a number of constraints need to use the same set of rules, and resources need to set the same options and parameters. To simplify this situation, you can refer to an existing object using an +id-ref+ instead of an id. So if for one resource you have [source,XML] ------ ------ Then instead of duplicating the rule for all your other resources, you can instead specify: .Referencing rules from other constraints ===== [source,XML] ------- ------- ===== [IMPORTANT] =========== The cluster will insist that the +rule+ exists somewhere. Attempting to add a reference to a non-existing rule will cause a validation failure, as will attempting to remove a +rule+ that is referenced elsewhere. =========== The same principle applies for +meta_attributes+ and +instance_attributes+ as illustrated in the example below: .Referencing attributes, options, and operations from other resources ===== [source,XML] ------- ------- ===== == Reloading Services After a Definition Change == The cluster automatically detects changes to the definition of services it manages. The normal response is to stop the service (using the old definition) and start it again (with the new definition). This works well, but some services are smarter and can be told to use a new set of options without restarting. To take advantage of this capability, the resource agent must: . Accept the +reload+ operation and perform any required actions. _The actions here depend completely on your application!_ + .The DRBD agent's logic for supporting +reload+ ===== [source,Bash] ------- case $1 in start) drbd_start ;; stop) drbd_stop ;; reload) drbd_reload ;; monitor) drbd_monitor ;; *) drbd_usage exit $OCF_ERR_UNIMPLEMENTED ;; esac exit $? ------- ===== . Advertise the +reload+ operation in the +actions+ section of its metadata + .The DRBD Agent Advertising Support for the +reload+ Operation ===== [source,XML] ------- 1.1 Master/Slave OCF Resource Agent for DRBD ... ------- ===== . Advertise one or more parameters that can take effect using +reload+. + Any parameter with the +unique+ set to 0 is eligible to be used in this way. + .Parameter that can be changed using reload ===== [source,XML] ------- Full path to the drbd.conf file. Path to drbd.conf ------- ===== Once these requirements are satisfied, the cluster will automatically know to reload the resource (instead of restarting) when a non-unique field changes. [NOTE] ====== Metadata will not be re-read unless the resource needs to be started. This may mean that the resource will be restarted the first time, even though you changed a parameter with +unique=0+. ====== [NOTE] ====== If both a unique and non-unique field are changed simultaneously, the resource will still be restarted. ====== diff --git a/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt b/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt index f548dc9093..2f5bec7b11 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Constraints.txt @@ -1,836 +1,836 @@ = Resource Constraints = indexterm:[Resource,Constraints] == Scores == Scores of all kinds are integral to how the cluster works. Practically everything from moving a resource to deciding which resource to stop in a degraded cluster is achieved by manipulating scores in some way. Scores are calculated per resource and node. Any node with a negative score for a resource can't run that resource. The cluster places a resource on the node with the highest score for it. === Infinity Math === Pacemaker implements +INFINITY+ (or equivalently, ++INFINITY+) internally as a score of 1,000,000. Addition and subtraction with it follow these three basic rules: * Any value + +INFINITY+ = +INFINITY+ * Any value - +INFINITY+ = +-INFINITY+ * +INFINITY+ - +INFINITY+ = +-INFINITY+ [NOTE] ====== What if you want to use a score higher than 1,000,000? Typically this possibility arises when someone wants to base the score on some external metric that might go above 1,000,000. The short answer is you can't. The long answer is it is sometimes possible work around this limitation creatively. You may be able to set the score to some computed value based on the external metric rather than use the metric directly. For nodes, you can store the metric as a node attribute, and query the attribute when computing the score (possibly as part of a custom resource agent). ====== == Deciding Which Nodes a Resource Can Run On == indexterm:[Location Constraints] indexterm:[Resource,Constraints,Location] 'Location constraints' tell the cluster which nodes a resource can run on. There are two alternative strategies. One way is to say that, by default, resources can run anywhere, and then the location constraints specify nodes that are not allowed (an 'opt-out' cluster). The other way is to start with nothing able to run anywhere, and use location constraints to selectively enable allowed nodes (an 'opt-in' cluster). Whether you should choose opt-in or opt-out depends on your personal preference and the make-up of your cluster. If most of your resources can run on most of the nodes, then an opt-out arrangement is likely to result in a simpler configuration. On the other-hand, if most resources can only run on a small subset of nodes, an opt-in configuration might be simpler. === Location Properties === .Properties of a rsc_location Constraint [width="95%",cols="2m,1,5 ------- ====== === Symmetrical "Opt-Out" Clusters === indexterm:[Symmetrical Opt-Out Clusters] indexterm:[Cluster Type,Symmetrical Opt-Out] To create an opt-out cluster, start by allowing resources to run anywhere by default: ---- # crm_attribute --name symmetric-cluster --update true ---- Then start disabling nodes. The following fragment is the equivalent of the above opt-in configuration. .Opt-out location constraints for two resources ====== [source,XML] ------- ------- ====== [[node-score-equal]] === What if Two Nodes Have the Same Score === If two nodes have the same score, then the cluster will choose one. This choice may seem random and may not be what was intended, however the cluster was not given enough information to know any better. .Constraints where a resource prefers two nodes equally ====== [source,XML] ------- ------- ====== In the example above, assuming no other constraints and an inactive cluster, +Webserver+ would probably be placed on +sles-1+ and +Database+ on +sles-2+. It would likely have placed +Webserver+ based on the node's uname and +Database+ based on the desire to spread the resource load evenly across the cluster. However other factors can also be involved in more complex configurations. [[s-resource-ordering]] == Specifying the Order in which Resources Should Start/Stop == indexterm:[Resource,Constraints,Ordering] indexterm:[Resource,Start Order] indexterm:[Ordering Constraints] 'Ordering constraints' tell the cluster the order in which resources should start. [IMPORTANT] ==== Ordering constraints affect 'only' the ordering of resources; they do 'not' require that the resources be placed on the same node. If you want resources to be started on the same node 'and' in a specific order, you need both an ordering constraint 'and' a colocation constraint (see <>), or alternatively, a group (see <>). ==== === Ordering Properties === .Properties of a rsc_order Constraint [width="95%",cols="1m,1,4> resources. === Optional and mandatory ordering === Here is an example of ordering constraints where +Database+ 'must' start before +Webserver+, and +IP+ 'should' start before +Webserver+ if they both need to be started: .Optional and mandatory ordering constraints ====== [source,XML] ------- ------- ====== Because the above example lets +symmetrical+ default to TRUE, +Webserver+ must be stopped before +Database+ can be stopped, and +Webserver+ should be stopped before +IP+ if they both need to be stopped. [[s-resource-colocation]] == Placing Resources Relative to other Resources == indexterm:[Resource,Constraints,Colocation] indexterm:[Resource,Location Relative to other Resources] 'Colocation constraints' tell the cluster that the location of one resource depends on the location of another one. Colocation has an important side-effect: it affects the order in which resources are assigned to a node. Think about it: You can't place A relative to B unless you know where B is. footnote:[ While the human brain is sophisticated enough to read the constraint in any order and choose the correct one depending on the situation, the cluster is not quite so smart. Yet. ] So when you are creating colocation constraints, it is important to consider whether you should colocate A with B, or B with A. Another thing to keep in mind is that, assuming A is colocated with B, the cluster will take into account A's preferences when deciding which node to choose for B. For a detailed look at exactly how this occurs, see http://clusterlabs.org/doc/Colocation_Explained.pdf[Colocation Explained]. [IMPORTANT] ==== Colocation constraints affect 'only' the placement of resources; they do 'not' require that the resources be started in a particular order. If you want resources to be started on the same node 'and' in a specific order, you need both an ordering constraint (see <>) 'and' a colocation constraint, or alternatively, a group (see <>). ==== === Colocation Properties === .Properties of a rsc_colocation Constraint [width="95%",cols="2m,5<",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the constraint. indexterm:[id,Colocation Constraints] indexterm:[Constraints,Colocation,id] |rsc |The name of a resource that should be located relative to +with-rsc+. indexterm:[rsc,Colocation Constraints] indexterm:[Constraints,Colocation,rsc] |with-rsc |The name of the resource used as the colocation target. The cluster will decide where to put this resource first and then decide where to put +rsc+. indexterm:[with-rsc,Colocation Constraints] indexterm:[Constraints,Colocation,with-rsc] |score |Positive values indicate the resources should run on the same node. Negative values indicate the resources should run on different nodes. Values of \+/- +INFINITY+ change "should" to "must". indexterm:[score,Colocation Constraints] indexterm:[Constraints,Colocation,score] |========================================================= === Mandatory Placement === Mandatory placement occurs when the constraint's score is ++INFINITY+ or +-INFINITY+. In such cases, if the constraint can't be satisfied, then the +rsc+ resource is not permitted to run. For +score=INFINITY+, this includes cases where the +with-rsc+ resource is not active. If you need resource +A+ to always run on the same machine as resource +B+, you would add the following constraint: .Mandatory colocation constraint for two resources ==== [source,XML] ==== Remember, because +INFINITY+ was used, if +B+ can't run on any of the cluster nodes (for whatever reason) then +A+ will not be allowed to run. Whether +A+ is running or not has no effect on +B+. Alternatively, you may want the opposite -- that +A+ 'cannot' run on the same machine as +B+. In this case, use +score="-INFINITY"+. .Mandatory anti-colocation constraint for two resources ==== [source,XML] ==== Again, by specifying +-INFINITY+, the constraint is binding. So if the only place left to run is where +B+ already is, then +A+ may not run anywhere. As with +INFINITY+, +B+ can run even if +A+ is stopped. However, in this case +A+ also can run if +B+ is stopped, because it still meets the constraint of +A+ and +B+ not running on the same node. === Advisory Placement === If mandatory placement is about "must" and "must not", then advisory placement is the "I'd prefer if" alternative. For constraints with scores greater than +-INFINITY+ and less than +INFINITY+, the cluster will try to accommodate your wishes but may ignore them if the alternative is to stop some of the cluster resources. As in life, where if enough people prefer something it effectively becomes mandatory, advisory colocation constraints can combine with other elements of the configuration to behave as if they were mandatory. .Advisory colocation constraint for two resources ==== [source,XML] ==== [[s-resource-sets]] == Resource Sets == 'Resource sets' allow multiple resources to be affected by a single constraint. .A set of 3 resources ==== [source,XML] ---- ---- ==== Resource sets are valid inside +rsc_location+, +rsc_order+ (see <>), +rsc_colocation+ (see <>), and +rsc_ticket+ (see <>) constraints. A resource set has a number of properties that can be set, though not all have an effect in all contexts. .Properties of a resource_set [width="95%",cols="2m,1,5 ------- ====== .Visual representation of the four resources' start order for the above constraints image::images/resource-set.png["Ordered set",width="16cm",height="2.5cm",align="center"] === Ordered Set === To simplify this situation, resource sets (see <>) can be used within ordering constraints: .A chain of ordered resources expressed as a set ====== [source,XML] ------- ------- ====== While the set-based format is not less verbose, it is significantly easier to get right and maintain. [IMPORTANT] ========= If you use a higher-level tool, pay attention to how it exposes this functionality. Depending on the tool, creating a set +A B+ may be equivalent to +A then B+, or +B then A+. ========= === Ordering Multiple Sets === The syntax can be expanded to allow sets of resources to be ordered relative to each other, where the members of each individual set may be ordered or unordered (controlled by the +sequential+ property). In the example below, +A+ and +B+ can both start in parallel, as can +C+ and +D+, however +C+ and +D+ can only start once _both_ +A+ _and_ +B+ are active. .Ordered sets of unordered resources ====== [source,XML] ------- ------- ====== .Visual representation of the start order for two ordered sets of unordered resources image::images/two-sets.png["Two ordered sets",width="13cm",height="7.5cm",align="center"] Of course either set -- or both sets -- of resources can also be internally ordered (by setting +sequential="true"+) and there is no limit to the number of sets that can be specified. .Advanced use of set ordering - Three ordered sets, two of which are internally unordered ====== [source,XML] ------- ------- ====== .Visual representation of the start order for the three sets defined above image::images/three-sets.png["Three ordered sets",width="16cm",height="7.5cm",align="center"] [IMPORTANT] ==== An ordered set with +sequential=false+ makes sense only if there is another set in the constraint. Otherwise, the constraint has no effect. ==== === Resource Set OR Logic === The unordered set logic discussed so far has all been "AND" logic. To illustrate this take the 3 resource set figure in the previous section. Those sets can be expressed, +(A and B) then \(C) then (D) then (E and F)+. Say for example we want to change the first set, +(A and B)+, to use "OR" logic so the sets look like this: +(A or B) then \(C) then (D) then (E and F)+. This functionality can be achieved through the use of the +require-all+ option. This option defaults to TRUE which is why the "AND" logic is used by default. Setting +require-all=false+ means only one resource in the set needs to be started before continuing on to the next set. .Resource Set "OR" logic: Three ordered sets, where the first set is internally unordered with "OR" logic ====== [source,XML] ------- ------- ====== [IMPORTANT] ==== An ordered set with +require-all=false+ makes sense only in conjunction with +sequential=false+. Think of it like this: +sequential=false+ modifies the set to be an unordered set using "AND" logic by default, and adding +require-all=false+ flips the unordered set's "AND" logic to "OR" logic. ==== [[s-resource-sets-colocation]] == Colocating Sets of Resources == Another common situation is for an administrator to create a set of colocated resources. One way to do this would be to define a resource group (see <>), but that cannot always accurately express the desired state. Another way would be to define each relationship as an individual constraint, but that causes a constraint explosion as the number of resources and combinations grow. An example of this approach: .Chain of colocated resources ====== [source,XML] ------- ------- ====== To make things easier, resource sets (see <>) can be used within colocation constraints. As with the chained version, a resource that can't be active prevents any resource that must be colocated with it from being active. For example, if +B+ is not able to run, then both +C+ and by inference +D+ must also remain stopped. Here is an example +resource_set+: .Equivalent colocation chain expressed using +resource_set+ ====== [source,XML] ------- ------- ====== [IMPORTANT] ========= If you use a higher-level tool, pay attention to how it exposes this functionality. Depending on the tool, creating a set +A B+ may be equivalent to +A with B+, or +B with A+. ========= This notation can also be used to tell the cluster that sets of resources must be colocated relative to each other, where the individual members of each set may or may not depend on each other being active (controlled by the +sequential+ property). In this example, +A+, +B+, and +C+ will each be colocated with +D+. +D+ must be active, but any of +A+, +B+, or +C+ may be inactive without affecting any other resources. .Using colocated sets to specify a common peer ====== [source,XML] ------- ------- ====== [IMPORTANT] ==== A colocated set with +sequential=false+ makes sense only if there is another set in the constraint. Otherwise, the constraint has no effect. ==== There is no inherent limit to the number and size of the sets used. The only thing that matters is that in order for any member of one set in the constraint to be active, all members of sets listed after it must also be active (and naturally on the same node); and if a set has +sequential="true"+, then in order for one member of that set to be active, all members listed before it must also be active. If desired, you can restrict the dependency to instances of multistate resources that are in a specific role, using the set's +role+ property. .Colocation chain in which the members of the middle set have no interdependencies, and the last listed set (which the cluster places first) is restricted to instances in master status. ====== [source,XML] ------- ------- ====== .Visual representation the above example (resources to the left are placed first) image::images/three-sets-complex.png["Colocation chain",width="16cm",height="9cm",align="center"] [NOTE] ==== Pay close attention to the order in which resources and sets are listed. While the colocation dependency for members of any one set is last-to-first, the colocation dependency for multiple sets is first-to-last. In the above example, +B+ is colocated with +A+, but +colocated-set-1+ is colocated with +colocated-set-2+. Unlike ordered sets, colocated sets do not use the +require-all+ option. ==== diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt index bf63da0ef1..f8a3daffc8 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt @@ -1,410 +1,435 @@ = Cluster-Wide Configuration = == CIB Properties == Certain settings are defined by CIB properties (that is, attributes of the +cib+ tag) rather than with the rest of the cluster configuration in the +configuration+ section. The reason is simply a matter of parsing. These options are used by the configuration database which is, by design, mostly ignorant of the content it holds. So the decision was made to place them in an easy-to-find location. .CIB Properties [width="95%",cols="2m,5<",options="header",align="center"] |========================================================= |Field |Description | admin_epoch | indexterm:[Configuration Version,Cluster] indexterm:[Cluster,Option,Configuration Version] indexterm:[admin_epoch,Cluster Option] indexterm:[Cluster,Option,admin_epoch] When a node joins the cluster, the cluster performs a check to see which node has the best configuration. It asks the node with the highest (+admin_epoch+, +epoch+, +num_updates+) tuple to replace the configuration on all the nodes -- which makes setting them, and setting them correctly, very important. +admin_epoch+ is never modified by the cluster; you can use this to make the configurations on any inactive nodes obsolete. _Never set this value to zero_. In such cases, the cluster cannot tell the difference between your configuration and the "empty" one used when nothing is found on disk. | epoch | indexterm:[epoch,Cluster Option] indexterm:[Cluster,Option,epoch] The cluster increments this every time the configuration is updated (usually by the administrator). | num_updates | indexterm:[num_updates,Cluster Option] indexterm:[Cluster,Option,num_updates] The cluster increments this every time the configuration or status is updated (usually by the cluster) and resets it to 0 when epoch changes. | validate-with | indexterm:[validate-with,Cluster Option] indexterm:[Cluster,Option,validate-with] Determines the type of XML validation that will be done on the configuration. If set to +none+, the cluster will not verify that updates conform to the DTD (nor reject ones that don't). This option can be useful when operating a mixed-version cluster during an upgrade. |cib-last-written | indexterm:[cib-last-written,Cluster Property] indexterm:[Cluster,Property,cib-last-written] Indicates when the configuration was last written to disk. Maintained by the cluster; for informational purposes only. |have-quorum | indexterm:[have-quorum,Cluster Property] indexterm:[Cluster,Property,have-quorum] Indicates if the cluster has quorum. If false, this may mean that the cluster cannot start resources or fence other nodes (see +no-quorum-policy+ below). Maintained by the cluster. |dc-uuid | indexterm:[dc-uuid,Cluster Property] indexterm:[Cluster,Property,dc-uuid] Indicates which cluster node is the current leader. Used by the cluster when placing resources and determining the order of some events. Maintained by the cluster. |========================================================= === Working with CIB Properties === Although these fields can be written to by the user, in most cases the cluster will overwrite any values specified by the user with the "correct" ones. To change the ones that can be specified by the user, for example +admin_epoch+, one should use: ---- # cibadmin --modify --xml-text '' ---- A complete set of CIB properties will look something like this: .Attributes set for a cib object ====== [source,XML] ------- ------- ====== [[s-cluster-options]] == Cluster Options == Cluster options, as you might expect, control how the cluster behaves when confronted with certain situations. They are grouped into sets within the +crm_config+ section, and, in advanced configurations, there may be more than one set. (This will be described later in the section on <> where we will show how to have the cluster use different sets of options during working hours than during weekends.) For now, we will describe the simple case where each option is present at most once. You can obtain an up-to-date list of cluster options, including their default values, by running the `man pengine` and `man crmd` commands. .Cluster Options [width="95%",cols="5m,2,11>). | enable-startup-probes | TRUE | indexterm:[enable-startup-probes,Cluster Option] indexterm:[Cluster,Option,enable-startup-probes] Should the cluster check for active resources during startup? | maintenance-mode | FALSE | indexterm:[maintenance-mode,Cluster Option] indexterm:[Cluster,Option,maintenance-mode] Should the cluster refrain from monitoring, starting and stopping resources? | stonith-enabled | TRUE | indexterm:[stonith-enabled,Cluster Option] indexterm:[Cluster,Option,stonith-enabled] Should failed nodes and nodes with resources that can't be stopped be shot? If you value your data, set up a STONITH device and enable this. If true, or unset, the cluster will refuse to start resources unless one or more STONITH resources have been configured. If false, unresponsive nodes are immediately assumed to be running no resources, and resource takeover to online nodes starts without any further protection (which means _data loss_ if the unresponsive node still accesses shared storage, for example). See also the +requires+ meta-attribute in <>. | stonith-action | reboot | indexterm:[stonith-action,Cluster Option] indexterm:[Cluster,Option,stonith-action] Action to send to STONITH device. Allowed values are +reboot+ and +off+. The value +poweroff+ is also allowed, but is only used for legacy devices. | stonith-timeout | 60s | indexterm:[stonith-timeout,Cluster Option] indexterm:[Cluster,Option,stonith-timeout] How long to wait for STONITH actions (reboot, on, off) to complete | concurrent-fencing | FALSE | indexterm:[concurrent-fencing,Cluster Option] indexterm:[Cluster,Option,concurrent-fencing] Is the cluster allowed to initiate multiple fence actions concurrently? | cluster-delay | 60s | indexterm:[cluster-delay,Cluster Option] indexterm:[Cluster,Option,cluster-delay] Estimated maximum round-trip delay over the network (excluding action execution). If the TE requires an action to be executed on another node, it will consider the action failed if it does not get a response from the other node in this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. | dc-deadtime | 20s | indexterm:[dc-deadtime,Cluster Option] indexterm:[Cluster,Option,dc-deadtime] How long to wait for a response from other nodes during startup. The "correct" value will depend on the speed/load of your network and the type of switches used. | cluster-recheck-interval | 15min | indexterm:[cluster-recheck-interval,Cluster Option] indexterm:[Cluster,Option,cluster-recheck-interval] Polling interval for time-based changes to options, resource parameters and constraints. The Cluster is primarily event-driven, but your configuration can have elements that take effect based on the time of day. To ensure these changes take effect, we can optionally poll the cluster's status for changes. A value of 0 disables polling. Positive values are an interval (in seconds unless other SI units are specified, e.g. 5min). | pe-error-series-max | -1 | indexterm:[pe-error-series-max,Cluster Option] indexterm:[Cluster,Option,pe-error-series-max] The number of PE inputs resulting in ERRORs to save. Used when reporting problems. A value of -1 means unlimited (report all). | pe-warn-series-max | -1 | indexterm:[pe-warn-series-max,Cluster Option] indexterm:[Cluster,Option,pe-warn-series-max] The number of PE inputs resulting in WARNINGs to save. Used when reporting problems. A value of -1 means unlimited (report all). | pe-input-series-max | -1 | indexterm:[pe-input-series-max,Cluster Option] indexterm:[Cluster,Option,pe-input-series-max] The number of "normal" PE inputs to save. Used when reporting problems. A value of -1 means unlimited (report all). +| node-health-strategy | none | +indexterm:[node-health-strategy,Cluster Option] +indexterm:[Cluster,Option,node-health-strategy] + How the cluster should react to node health attributes (see <>). + Allowed values are +none+, +migrate-on-red+, +only-green+, +progressive+, and + +custom+. + +| node-health-green | 0 | +indexterm:[node-health-green,Cluster Option] +indexterm:[Cluster,Option,node-health-green] + The score to use for a node health attribute whose value is +green+. + Only used when +node-health-strategy+ is +progressive+ or +custom+. + +| node-health-yellow | 0 | +indexterm:[node-health-yellow,Cluster Option] +indexterm:[Cluster,Option,node-health-yellow] + The score to use for a node health attribute whose value is +yellow+. + Only used when +node-health-strategy+ is +progressive+ or +custom+. + +| node-health-red | 0 | +indexterm:[node-health-red,Cluster Option] +indexterm:[Cluster,Option,node-health-red] + The score to use for a node health attribute whose value is +red+. + Only used when +node-health-strategy+ is +progressive+ or +custom+. + | remove-after-stop | FALSE | indexterm:[remove-after-stop,Cluster Option] indexterm:[Cluster,Option,remove-after-stop] _Advanced Use Only:_ Should the cluster remove resources from the LRM after they are stopped? Values other than the default are, at best, poorly tested and potentially dangerous. | startup-fencing | TRUE | indexterm:[startup-fencing,Cluster Option] indexterm:[Cluster,Option,startup-fencing] _Advanced Use Only:_ Should the cluster shoot unseen nodes? Not using the default is very unsafe! | election-timeout | 2min | indexterm:[election-timeout,Cluster Option] indexterm:[Cluster,Option,election-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | shutdown-escalation | 20min | indexterm:[shutdown-escalation,Cluster Option] indexterm:[Cluster,Option,shutdown-escalation] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-integration-timeout | 3min | indexterm:[crmd-integration-timeout,Cluster Option] indexterm:[Cluster,Option,crmd-integration-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-finalization-timeout | 30min | indexterm:[crmd-finalization-timeout,Cluster Option] indexterm:[Cluster,Option,crmd-finalization-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-transition-delay | 0s | indexterm:[crmd-transition-delay,Cluster Option] indexterm:[Cluster,Option,crmd-transition-delay] _Advanced Use Only:_ Delay cluster recovery for the configured interval to allow for additional/related events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. Enabling this option will slow down cluster recovery under all conditions. |default-resource-stickiness | 0 | indexterm:[default-resource-stickiness,Cluster Option] indexterm:[Cluster,Option,default-resource-stickiness] _Deprecated:_ See <> instead | is-managed-default | TRUE | indexterm:[is-managed-default,Cluster Option] indexterm:[Cluster,Option,is-managed-default] _Deprecated:_ See <> instead | default-action-timeout | 20s | indexterm:[default-action-timeout,Cluster Option] indexterm:[Cluster,Option,default-action-timeout] _Deprecated:_ See <> instead |========================================================= === Querying and Setting Cluster Options === indexterm:[Querying,Cluster Option] indexterm:[Setting,Cluster Option] indexterm:[Cluster,Querying Options] indexterm:[Cluster,Setting Options] Cluster options can be queried and modified using the `crm_attribute` tool. To get the current value of +cluster-delay+, you can run: ---- # crm_attribute --query --name cluster-delay ---- which is more simply written as ---- # crm_attribute -G -n cluster-delay ---- If a value is found, you'll see a result like this: ---- # crm_attribute -G -n cluster-delay scope=crm_config name=cluster-delay value=60s ---- If no value is found, the tool will display an error: ---- # crm_attribute -G -n clusta-deway scope=crm_config name=clusta-deway value=(null) Error performing operation: No such device or address ---- To use a different value (for example, 30 seconds), simply run: ---- # crm_attribute --name cluster-delay --update 30s ---- To go back to the cluster's default value, you can delete the value, for example: ---- # crm_attribute --name cluster-delay --delete Deleted crm_config option: id=cib-bootstrap-options-cluster-delay name=cluster-delay ---- === When Options are Listed More Than Once === If you ever see something like the following, it means that the option you're modifying is present more than once. .Deleting an option that is listed twice ======= ------ # crm_attribute --name batch-limit --delete Multiple attributes match name=batch-limit in crm_config: Value: 50 (set=cib-bootstrap-options, id=cib-bootstrap-options-batch-limit) Value: 100 (set=custom, id=custom-batch-limit) Please choose from one of the matches above and supply the 'id' with --id ------- ======= In such cases, follow the on-screen instructions to perform the requested action. To determine which value is currently being used by the cluster, refer to <>. diff --git a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt index 77c98885a2..dbd970b694 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt @@ -1,600 +1,600 @@ = Rules = //// We prefer [[ch-rules]], but older versions of asciidoc don't deal well with that construct for chapter headings //// anchor:ch-rules[Chapter 8, Rules] indexterm:[Resource,Constraint,Rule] Rules can be used to make your configuration more dynamic. One common example is to set one value for +resource-stickiness+ during working hours, to prevent resources from being moved back to their most preferred location, and another on weekends when no-one is around to notice an outage. Another use of rules might be to assign machines to different processing groups (using a node attribute) based on time and to then use that attribute when creating location constraints. Each rule can contain a number of expressions, date-expressions and even other rules. The results of the expressions are combined based on the rule's +boolean-op+ field to determine if the rule ultimately evaluates to +true+ or +false+. What happens next depends on the context in which the rule is being used. == Rule Properties == .Properties of a Rule [width="95%",cols="2m,1,5<",options="header",align="center"] |========================================================= |Field |Default |Description |role |+Started+ |Limits the rule to apply only when the resource is in the specified role. Allowed values are +Started+, +Slave+, and +Master+. A rule with +role="Master"+ cannot determine the initial location of a clone instance and will only affect which of the active instances will be promoted. indexterm:[role,Constraint Rule] indexterm:[Constraint,Rule,role] |score | |The score to apply if the rule evaluates to +true+. Limited to use in rules that are part of location constraints. indexterm:[score,Constraint Rule] indexterm:[Constraint,Rule,score] |score-attribute | |The node attribute to look up and use as a score if the rule evaluates to +true+. Limited to use in rules that are part of location constraints. indexterm:[score-attribute,Constraint Rule] indexterm:[Constraint,Rule,score-attribute] |boolean-op |+and+ |How to combine the result of multiple expression objects. Allowed values are +and+ and +or+. indexterm:[boolean-op,Constraint Rule] indexterm:[Constraint,Rule,boolean-op] |========================================================= == Node Attribute Expressions == indexterm:[Resource,Constraint,Attribute Expression] Expression objects are used to control a resource based on the attributes defined by a node or nodes. .Properties of an Expression [width="95%",cols="1m,1,5 ---- ==== .Equivalent expression ==== [source,XML] ---- ---- ==== .9am-5pm Monday-Friday ==== [source,XML] ------- ------- ==== Please note that the +16+ matches up to +16:59:59+, as the numeric value (hour) still matches! .9am-6pm Monday through Friday or anytime Saturday ==== [source,XML] ------- ------- ==== .9am-5pm or 9pm-12am Monday through Friday ==== [source,XML] ------- ------- ==== .Mondays in March 2005 ==== [source,XML] ------- ------- ==== [NOTE] ====== Because no time is specified with the above dates, 00:00:00 is implied. This means that the range includes all of 2005-03-01 but none of 2005-04-01. You may wish to write +end="2005-03-31T23:59:59"+ to avoid confusion. ====== .A full moon on Friday the 13th ===== [source,XML] ------- ------- ===== == Using Rules to Determine Resource Location == indexterm:[Rule,Determine Resource Location] indexterm:[Resource,Location,Determine by Rules] A location constraint may contain rules. When the constraint's outermost rule evaluates to +false+, the cluster treats the constraint as if it were not there. When the rule evaluates to +true+, the node's preference for running the resource is updated with the score associated with the rule. If this sounds familiar, it is because you have been using a simplified syntax for location constraint rules already. Consider the following location constraint: .Prevent myApacheRsc from running on c001n03 ===== [source,XML] ------- ------- ===== This constraint can be more verbosely written as: .Prevent myApacheRsc from running on c001n03 - expanded version ===== [source,XML] ------- ------- ===== The advantage of using the expanded form is that one can then add extra clauses to the rule, such as limiting the rule such that it only applies during certain times of the day or days of the week. === Location Rules Based on Other Node Properties === The expanded form allows us to match on node properties other than its name. If we rated each machine's CPU power such that the cluster had the following nodes section: .A sample nodes section for use with score-attribute ===== [source,XML] ------- ------- ===== then we could prevent resources from running on underpowered machines with this rule: [source,XML] ------- ------- === Using +score-attribute+ Instead of +score+ === When using +score-attribute+ instead of +score+, each node matched by the rule has its score adjusted differently, according to its value for the named node attribute. Thus, in the previous example, if a rule used +score-attribute="cpu_mips"+, +c001n01+ would have its preference to run the resource increased by +1234+ whereas +c001n02+ would have its preference increased by +5678+. == Using Rules to Control Resource Options == Often some cluster nodes will be different from their peers. Sometimes, these differences -- e.g. the location of a binary or the names of network interfaces -- require resources to be configured differently depending on the machine they're hosted on. By defining multiple +instance_attributes+ objects for the resource and adding a rule to each, we can easily handle these special cases. In the example below, +mySpecialRsc+ will use eth1 and port 9999 when run on +node1+, eth2 and port 8888 on +node2+ and default to eth0 and port 9999 for all other nodes. .Defining different resource options based on the node name ===== [source,XML] ------- ------- ===== The order in which +instance_attributes+ objects are evaluated is determined by their score (highest to lowest). If not supplied, score defaults to zero, and objects with an equal score are processed in listed order. If the +instance_attributes+ object has no rule or a +rule+ that evaluates to +true+, then for any parameter the resource does not yet have a value for, the resource will use the parameter values defined by the +instance_attributes+. For example, given the configuration above, if the resource is placed on node1: . +special-node1+ has the highest score (3) and so is evaluated first; its rule evaluates to +true+, so +interface+ is set to +eth1+. . +special-node2+ is evaluated next with score 2, but its rule evaluates to +false+, so it is ignored. . +defaults+ is evaluated last with score 1, and has no rule, so its values are examined; +interface+ is already defined, so the value here is not used, but +port+ is not yet defined, so +port+ is set to +9999+. == Using Rules to Control Cluster Options == indexterm:[Rule,Controlling Cluster Options] indexterm:[Cluster,Setting Options with Rules] Controlling cluster options is achieved in much the same manner as specifying different resource options on different nodes. The difference is that because they are cluster options, one cannot (or should not, because they won't work) use attribute-based expressions. The following example illustrates how to set a different +resource-stickiness+ value during and outside work hours. This allows resources to automatically move back to their most preferred hosts, but at a time that (in theory) does not interfere with business activities. .Change +resource-stickiness+ during working hours ===== [source,XML] ------- ------- ===== [[s-rules-recheck]] == Ensuring Time-Based Rules Take Effect == A Pacemaker cluster is an event-driven system. As such, it won't recalculate the best place for resources to run unless something (like a resource failure or configuration change) happens. This can mean that a location constraint that only allows resource X to run between 9am and 5pm is not enforced. If you rely on time-based rules, the +cluster-recheck-interval+ cluster option (which defaults to 15 minutes) is essential. This tells the cluster to periodically recalculate the ideal state of the cluster. For example, if you set +cluster-recheck-interval="5m"+, then sometime between 09:00 and 09:05 the cluster would notice that it needs to start resource X, and between 17:00 and 17:05 it would realize that X needed to be stopped. The timing of the actual start and stop actions depends on what other actions the cluster may need to perform first. diff --git a/doc/Pacemaker_Explained/en-US/Ch-Stonith.txt b/doc/Pacemaker_Explained/en-US/Ch-Stonith.txt index 7e7cb58c5d..84053f53c4 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Stonith.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Stonith.txt @@ -1,907 +1,908 @@ = STONITH = //// We prefer [[ch-stonith]], but older versions of asciidoc don't deal well with that construct for chapter headings //// anchor:ch-stonith[Chapter 13, STONITH] indexterm:[STONITH, Configuration] == What Is STONITH? == STONITH (an acronym for "Shoot The Other Node In The Head"), also called 'fencing', protects your data from being corrupted by rogue nodes or concurrent access. Just because a node is unresponsive, this doesn't mean it isn't accessing your data. The only way to be 100% sure that your data is safe, is to use STONITH so we can be certain that the node is truly offline, before allowing the data to be accessed from another node. STONITH also has a role to play in the event that a clustered service cannot be stopped. In this case, the cluster uses STONITH to force the whole node offline, thereby making it safe to start the service elsewhere. == What STONITH Device Should You Use? == It is crucial that the STONITH device can allow the cluster to differentiate between a node failure and a network one. The biggest mistake people make in choosing a STONITH device is to use a remote power switch (such as many on-board IPMI controllers) that shares power with the node it controls. In such cases, the cluster cannot be sure if the node is really offline, or active and suffering from a network fault. Likewise, any device that relies on the machine being active (such as SSH-based "devices" used during testing) are inappropriate. == Special Treatment of STONITH Resources == STONITH resources are somewhat special in Pacemaker. STONITH may be initiated by pacemaker or by other parts of the cluster (such as resources like DRBD or DLM). To accommodate this, pacemaker does not require the STONITH resource to be in the 'started' state in order to be used, thus allowing reliable use of STONITH devices in such a case. [NOTE] ==== In pacemaker versions 1.1.9 and earlier, this feature either did not exist or did not work well. Only "running" STONITH resources could be used by Pacemaker for fencing, and if another component tried to fence a node while Pacemaker was moving STONITH resources, the fencing could fail. ==== All nodes have access to STONITH devices' definitions and instantiate them on-the-fly when needed, but preference is given to 'verified' instances, which are the ones that are 'started' according to the cluster's knowledge. In the case of a cluster split, the partition with a verified instance will have a slight advantage, because the STONITH daemon in the other partition will have to hear from all its current peers before choosing a node to perform the fencing. Fencing resources do work the same as regular resources in some respects: * +target-role+ can be used to enable or disable the resource * Location constraints can be used to prevent a specific node from using the resource [IMPORTANT] =========== Currently there is a limitation that fencing resources may only have one set of meta-attributes and one set of instance attributes. This can be revisited if it becomes a significant limitation for people. =========== See the table below or run `man stonithd` to see special instance attributes that may be set for any fencing resource, regardless of fence agent. .Properties of Fencing Resources [width="95%",cols="5m,2,3,10 ---- ==== Based on that, we would create a STONITH resource fragment that might look like this: .An IPMI-based STONITH Resource ==== [source,XML] ---- ---- ==== Finally, we need to enable STONITH: ---- # crm_attribute -t crm_config -n stonith-enabled -v true ---- == Advanced STONITH Configurations == Some people consider that having one fencing device is a single point of failure footnote:[Not true, since a node or resource must fail before fencing even has a chance to]; others prefer removing the node from the storage and network instead of turning it off. Whatever the reason, Pacemaker supports fencing nodes with multiple devices through a feature called 'fencing topologies'. Simply create the individual devices as you normally would, then define one or more +fencing-level+ entries in the +fencing-topology+ section of the configuration. * Each fencing level is attempted in order of ascending +index+. Allowed - indexes are 0 to 9. + values are 1 through 9. * If a device fails, processing terminates for the current level. No further devices in that level are exercised, and the next level is attempted instead. * If the operation succeeds for all the listed devices in a level, the level is deemed to have passed. * The operation is finished when a level has passed (success), or all levels have been attempted (failed). * If the operation failed, the next step is determined by the Policy Engine and/or `crmd`. Some possible uses of topologies include: * Try poison-pill and fail back to power * Try disk and network, and fall back to power if either fails * Initiate a kdump and then poweroff the node .Properties of Fencing Levels [width="95%",cols="1m,3<",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the level indexterm:[id,fencing-level] indexterm:[Fencing,fencing-level,id] |target |The name of a single node to which this level applies indexterm:[target,fencing-level] indexterm:[Fencing,fencing-level,target] |target-pattern |A regular expression matching the names of nodes to which this level applies '(since 1.1.14)' indexterm:[target-pattern,fencing-level] indexterm:[Fencing,fencing-level,target-pattern] |target-attribute |The name of a node attribute that is set (to +target-value+) for nodes to which this level applies '(since 1.1.14)' indexterm:[target-attribute,fencing-level] indexterm:[Fencing,fencing-level,target-attribute] |target-value |The node attribute value (of +target-attribute+) that is set for nodes to which this level applies '(since 1.1.14)' indexterm:[target-attribute,fencing-level] indexterm:[Fencing,fencing-level,target-attribute] |index |The order in which to attempt the levels. Levels are attempted in ascending order 'until one succeeds'. + Valid values are 1 through 9. indexterm:[index,fencing-level] indexterm:[Fencing,fencing-level,index] |devices |A comma-separated list of devices that must all be tried for this level indexterm:[devices,fencing-level] indexterm:[Fencing,fencing-level,devices] |========================================================= .Fencing topology with different devices for different nodes ==== [source,XML] ---- ... ... ---- ==== === Example Dual-Layer, Dual-Device Fencing Topologies === The following example illustrates an advanced use of +fencing-topology+ in a cluster with the following properties: * 3 nodes (2 active prod-mysql nodes, 1 prod_mysql-rep in standby for quorum purposes) * the active nodes have an IPMI-controlled power board reached at 192.0.2.1 and 192.0.2.2 * the active nodes also have two independent PSUs (Power Supply Units) connected to two independent PDUs (Power Distribution Units) reached at 198.51.100.1 (port 10 and port 11) and 203.0.113.1 (port 10 and port 11) * the first fencing method uses the `fence_ipmi` agent * the second fencing method uses the `fence_apc_snmp` agent targetting 2 fencing devices (one per PSU, either port 10 or 11) * fencing is only implemented for the active nodes and has location constraints * fencing topology is set to try IPMI fencing first then default to a "sure-kill" dual PDU fencing In a normal failure scenario, STONITH will first select +fence_ipmi+ to try to kill the faulty node. Using a fencing topology, if that first method fails, STONITH will then move on to selecting +fence_apc_snmp+ twice: * once for the first PDU * again for the second PDU The fence action is considered successful only if both PDUs report the required status. If any of them fails, STONITH loops back to the first fencing method, +fence_ipmi+, and so on until the node is fenced or fencing action is cancelled. .First fencing method: single IPMI device Each cluster node has it own dedicated IPMI channel that can be called for fencing using the following primitives: [source,XML] ---- ---- .Second fencing method: dual PDU devices Each cluster node also has two distinct power channels controlled by two distinct PDUs. That means a total of 4 fencing devices configured as follows: - Node 1, PDU 1, PSU 1 @ port 10 - Node 1, PDU 2, PSU 2 @ port 10 - Node 2, PDU 1, PSU 1 @ port 11 - Node 2, PDU 2, PSU 2 @ port 11 The matching fencing agents are configured as follows: [source,XML] ---- ---- .Location Constraints To prevent STONITH from trying to run a fencing agent on the same node it is supposed to fence, constraints are placed on all the fencing primitives: [source,XML] ---- ---- .Fencing topology Now that all the fencing resources are defined, it's time to create the right topology. We want to first fence using IPMI and if that does not work, fence both PDUs to effectively and surely kill the node. [source,XML] ---- ---- Please note, in +fencing-topology+, the lowest +index+ value determines the priority of the first fencing method. .Final configuration Put together, the configuration looks like this: [source,XML] ---- ... ... ---- == Remapping Reboots == When the cluster needs to reboot a node, whether because +stonith-action+ is +reboot+ or because a reboot was manually requested (such as by `stonith_admin --reboot`), it will remap that to other commands in two cases: . If the chosen fencing device does not support the +reboot+ command, the cluster will ask it to perform +off+ instead. . If a fencing topology level with multiple devices must be executed, the cluster will ask all the devices to perform +off+, then ask the devices to perform +on+. To understand the second case, consider the example of a node with redundant power supplies connected to intelligent power switches. Rebooting one switch and then the other would have no effect on the node. Turning both switches off, and then on, actually reboots the node. In such a case, the fencing operation will be treated as successful as long as the +off+ commands succeed, because then it is safe for the cluster to recover any resources that were on the node. Timeouts and errors in the +on+ phase will be logged but ignored. When a reboot operation is remapped, any action-specific timeout for the remapped action will be used (for example, +pcmk_off_timeout+ will be used when executing the +off+ command, not +pcmk_reboot_timeout+). [NOTE] ==== In Pacemaker versions 1.1.13 and earlier, reboots will not be remapped in the second case. To achieve the same effect, separate fencing devices for off and on actions must be configured. ==== diff --git a/doc/Pacemaker_Explained/en-US/Pacemaker_Explained.xml b/doc/Pacemaker_Explained/en-US/Pacemaker_Explained.xml index 991e002a3a..52f9236d8d 100644 --- a/doc/Pacemaker_Explained/en-US/Pacemaker_Explained.xml +++ b/doc/Pacemaker_Explained/en-US/Pacemaker_Explained.xml @@ -1,47 +1,45 @@ - - Further Reading Project Website: Project Documentation: SUSE High Availibility Guide: Heartbeat configuration: Corosync Configuration: diff --git a/doc/Pacemaker_Explained/en-US/Revision_History.xml b/doc/Pacemaker_Explained/en-US/Revision_History.xml index 4bd3485d26..c781a023e8 100644 --- a/doc/Pacemaker_Explained/en-US/Revision_History.xml +++ b/doc/Pacemaker_Explained/en-US/Revision_History.xml @@ -1,84 +1,96 @@ Revision History 1-0 19 Oct 2009 AndrewBeekhofandrew@beekhof.net Import from Pages.app 2-0 26 Oct 2009 AndrewBeekhofandrew@beekhof.net Cleanup and reformatting of docbook xml complete 3-0 Tue Nov 12 2009 AndrewBeekhofandrew@beekhof.net Split book into chapters and pass validation Re-organize book for use with Publican 4-0 Mon Oct 8 2012 AndrewBeekhofandrew@beekhof.net Converted to asciidoc (which is converted to docbook for use with Publican) 5-0 Mon Feb 23 2015 KenGaillotkgaillot@redhat.com Update for clarity, stylistic consistency and current command-line syntax 6-0 Tue Dec 8 2015 KenGaillotkgaillot@redhat.com Update for Pacemaker 1.1.14 7-0 Tue May 3 2016 KenGaillotkgaillot@redhat.com Update for Pacemaker 1.1.15 + + 7-1 + Fri Oct 28 2016 + KenGaillotkgaillot@redhat.com + + + + Overhaul upgrade documentation, and document node health strategies + + + + diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h index e562c66d4f..6ec796a9c8 100644 --- a/include/crm/msg_xml.h +++ b/include/crm/msg_xml.h @@ -1,428 +1,431 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef XML_TAGS__H # define XML_TAGS__H # ifndef F_ORIG # define F_ORIG "src" # endif # ifndef F_SEQ # define F_SEQ "seq" # endif # ifndef F_SUBTYPE # define F_SUBTYPE "subt" # endif # ifndef F_TYPE # define F_TYPE "t" # endif # ifndef F_CLIENTNAME # define F_CLIENTNAME "cn" # endif # ifndef F_XML_TAGNAME # define F_XML_TAGNAME "__name__" # endif # ifndef T_CRM # define T_CRM "crmd" # endif # ifndef T_ATTRD # define T_ATTRD "attrd" # endif # define CIB_OPTIONS_FIRST "cib-bootstrap-options" # define F_CRM_DATA "crm_xml" # define F_CRM_TASK "crm_task" # define F_CRM_HOST_TO "crm_host_to" # define F_CRM_MSG_TYPE F_SUBTYPE # define F_CRM_SYS_TO "crm_sys_to" # define F_CRM_SYS_FROM "crm_sys_from" # define F_CRM_HOST_FROM F_ORIG # define F_CRM_REFERENCE XML_ATTR_REFERENCE # define F_CRM_VERSION XML_ATTR_VERSION # define F_CRM_ORIGIN "origin" # define F_CRM_USER "crm_user" # define F_CRM_JOIN_ID "join_id" # define F_CRM_ELECTION_ID "election-id" # define F_CRM_ELECTION_AGE_S "election-age-sec" # define F_CRM_ELECTION_AGE_US "election-age-nano-sec" # define F_CRM_ELECTION_OWNER "election-owner" # define F_CRM_TGRAPH "crm-tgraph" # define F_CRM_TGRAPH_INPUT "crm-tgraph-in" # define F_CRM_THROTTLE_MODE "crm-limit-mode" # define F_CRM_THROTTLE_MAX "crm-limit-max" /*---- Common tags/attrs */ # define XML_DIFF_MARKER "__crm_diff_marker__" # define XML_ATTR_TAGNAME F_XML_TAGNAME # define XML_TAG_CIB "cib" # define XML_TAG_FAILED "failed" # define XML_ATTR_CRM_VERSION "crm_feature_set" # define XML_ATTR_DIGEST "digest" # define XML_ATTR_VALIDATION "validate-with" # define XML_ATTR_QUORUM_PANIC "no-quorum-panic" # define XML_ATTR_HAVE_QUORUM "have-quorum" # define XML_ATTR_HAVE_WATCHDOG "have-watchdog" # define XML_ATTR_EXPECTED_VOTES "expected-quorum-votes" # define XML_ATTR_GENERATION "epoch" # define XML_ATTR_GENERATION_ADMIN "admin_epoch" # define XML_ATTR_NUMUPDATES "num_updates" # define XML_ATTR_TIMEOUT "timeout" # define XML_ATTR_ORIGIN "crm-debug-origin" # define XML_ATTR_TSTAMP "crm-timestamp" # define XML_CIB_ATTR_WRITTEN "cib-last-written" # define XML_ATTR_VERSION "version" # define XML_ATTR_DESC "description" # define XML_ATTR_ID "id" # define XML_ATTR_IDREF "id-ref" # define XML_ATTR_ID_LONG "long-id" # define XML_ATTR_TYPE "type" # define XML_ATTR_FILTER_TYPE "type-filter" # define XML_ATTR_FILTER_ID "id-filter" # define XML_ATTR_FILTER_PRIORITY "priority-filter" # define XML_ATTR_VERBOSE "verbose" # define XML_ATTR_OP "op" # define XML_ATTR_DC "is_dc" # define XML_ATTR_DC_UUID "dc-uuid" # define XML_ATTR_UPDATE_ORIG "update-origin" # define XML_ATTR_UPDATE_CLIENT "update-client" # define XML_ATTR_UPDATE_USER "update-user" # define XML_BOOLEAN_TRUE "true" # define XML_BOOLEAN_FALSE "false" # define XML_BOOLEAN_YES XML_BOOLEAN_TRUE # define XML_BOOLEAN_NO XML_BOOLEAN_FALSE # define XML_TAG_OPTIONS "options" /*---- top level tags/attrs */ # define XML_MSG_TAG "crm_message" # define XML_MSG_TAG_DATA "msg_data" # define XML_ATTR_REQUEST "request" # define XML_ATTR_RESPONSE "response" # define XML_ATTR_UNAME "uname" # define XML_ATTR_UUID "id" # define XML_ATTR_REFERENCE "reference" # define XML_FAIL_TAG_RESOURCE "failed_resource" # define XML_FAILRES_ATTR_RESID "resource_id" # define XML_FAILRES_ATTR_REASON "reason" # define XML_FAILRES_ATTR_RESSTATUS "resource_status" # define XML_CRM_TAG_PING "ping_response" # define XML_PING_ATTR_STATUS "result" # define XML_PING_ATTR_SYSFROM "crm_subsystem" # define XML_TAG_FRAGMENT "cib_fragment" # define XML_ATTR_RESULT "result" # define XML_ATTR_SECTION "section" # define XML_FAIL_TAG_CIB "failed_update" # define XML_FAILCIB_ATTR_ID "id" # define XML_FAILCIB_ATTR_OBJTYPE "object_type" # define XML_FAILCIB_ATTR_OP "operation" # define XML_FAILCIB_ATTR_REASON "reason" /*---- CIB specific tags/attrs */ # define XML_CIB_TAG_SECTION_ALL "all" # define XML_CIB_TAG_CONFIGURATION "configuration" # define XML_CIB_TAG_STATUS "status" # define XML_CIB_TAG_RESOURCES "resources" # define XML_CIB_TAG_NODES "nodes" # define XML_CIB_TAG_DOMAINS "domains" # define XML_CIB_TAG_CONSTRAINTS "constraints" # define XML_CIB_TAG_CRMCONFIG "crm_config" # define XML_CIB_TAG_OPCONFIG "op_defaults" # define XML_CIB_TAG_RSCCONFIG "rsc_defaults" # define XML_CIB_TAG_ACLS "acls" # define XML_CIB_TAG_ALERTS "alerts" # define XML_CIB_TAG_ALERT "alert" # define XML_CIB_TAG_ALERT_RECIPIENT "recipient" # define XML_CIB_TAG_STATE "node_state" # define XML_CIB_TAG_NODE "node" # define XML_CIB_TAG_DOMAIN "domain" # define XML_CIB_TAG_CONSTRAINT "constraint" # define XML_CIB_TAG_NVPAIR "nvpair" # define XML_CIB_TAG_PROPSET "cluster_property_set" # define XML_TAG_ATTR_SETS "instance_attributes" # define XML_TAG_META_SETS "meta_attributes" # define XML_TAG_ATTRS "attributes" # define XML_TAG_VER_ATTRS "versioned_attributes" # define XML_TAG_PARAMS "parameters" # define XML_TAG_PARAM "param" # define XML_TAG_UTILIZATION "utilization" # define XML_TAG_RESOURCE_REF "resource_ref" # define XML_CIB_TAG_RESOURCE "primitive" # define XML_CIB_TAG_GROUP "group" # define XML_CIB_TAG_INCARNATION "clone" # define XML_CIB_TAG_MASTER "master" # define XML_CIB_TAG_RSC_TEMPLATE "template" # define XML_RSC_ATTR_ISOLATION_INSTANCE "isolation-instance" # define XML_RSC_ATTR_ISOLATION_WRAPPER "isolation-wrapper" # define XML_RSC_ATTR_ISOLATION_HOST "isolation-host" # define XML_RSC_ATTR_ISOLATION "isolation" # define XML_RSC_ATTR_RESTART "restart-type" # define XML_RSC_ATTR_ORDERED "ordered" # define XML_RSC_ATTR_INTERLEAVE "interleave" # define XML_RSC_ATTR_INCARNATION "clone" # define XML_RSC_ATTR_INCARNATION_MAX "clone-max" # define XML_RSC_ATTR_INCARNATION_MIN "clone-min" # define XML_RSC_ATTR_INCARNATION_NODEMAX "clone-node-max" # define XML_RSC_ATTR_MASTER_MAX "master-max" # define XML_RSC_ATTR_MASTER_NODEMAX "master-node-max" # define XML_RSC_ATTR_STATE "clone-state" # define XML_RSC_ATTR_MANAGED "is-managed" # define XML_RSC_ATTR_TARGET_ROLE "target-role" # define XML_RSC_ATTR_UNIQUE "globally-unique" # define XML_RSC_ATTR_NOTIFY "notify" # define XML_RSC_ATTR_STICKINESS "resource-stickiness" # define XML_RSC_ATTR_FAIL_STICKINESS "migration-threshold" # define XML_RSC_ATTR_FAIL_TIMEOUT "failure-timeout" # define XML_RSC_ATTR_MULTIPLE "multiple-active" # define XML_RSC_ATTR_PRIORITY "priority" # define XML_RSC_ATTR_REQUIRES "requires" # define XML_RSC_ATTR_PROVIDES "provides" # define XML_RSC_ATTR_CONTAINER "container" # define XML_RSC_ATTR_INTERNAL_RSC "internal_rsc" # define XML_RSC_ATTR_MAINTENANCE "maintenance" # define XML_RSC_ATTR_REMOTE_NODE "remote-node" # define XML_REMOTE_ATTR_RECONNECT_INTERVAL "reconnect_interval" # define XML_OP_ATTR_ON_FAIL "on-fail" # define XML_OP_ATTR_START_DELAY "start-delay" # define XML_OP_ATTR_ALLOW_MIGRATE "allow-migrate" # define XML_OP_ATTR_DEPENDENT "dependent-on" # define XML_OP_ATTR_ORIGIN "interval-origin" # define XML_OP_ATTR_PENDING "record-pending" # define XML_CIB_TAG_LRM "lrm" # define XML_LRM_TAG_RESOURCES "lrm_resources" # define XML_LRM_TAG_RESOURCE "lrm_resource" # define XML_LRM_TAG_AGENTS "lrm_agents" # define XML_LRM_TAG_AGENT "lrm_agent" # define XML_LRM_TAG_RSC_OP "lrm_rsc_op" # define XML_AGENT_ATTR_CLASS "class" # define XML_AGENT_ATTR_PROVIDER "provider" # define XML_LRM_TAG_ATTRIBUTES "attributes" # define XML_CIB_ATTR_REPLACE "replace" # define XML_CIB_ATTR_SOURCE "source" # define XML_CIB_ATTR_HEALTH "health" # define XML_CIB_ATTR_WEIGHT "weight" # define XML_CIB_ATTR_PRIORITY "priority" # define XML_CIB_ATTR_CLEAR "clear_on" # define XML_CIB_ATTR_SOURCE "source" # define XML_NODE_JOIN_STATE "join" # define XML_NODE_EXPECTED "expected" # define XML_NODE_IN_CLUSTER "in_ccm" # define XML_NODE_IS_PEER "crmd" # define XML_NODE_IS_REMOTE "remote_node" # define XML_NODE_IS_FENCED "node_fenced" # define XML_CIB_ATTR_SHUTDOWN "shutdown" # define XML_CIB_ATTR_STONITH "stonith" /* LRM is a bit of a misnomer here; the crmd and pengine use these to track * actions, which usually but not always are LRM operations */ # define XML_LRM_ATTR_INTERVAL "interval" # define XML_LRM_ATTR_TASK "operation" # define XML_LRM_ATTR_TASK_KEY "operation_key" # define XML_LRM_ATTR_TARGET "on_node" # define XML_LRM_ATTR_TARGET_UUID "on_node_uuid" /*! Actions to be executed on Pacemaker Remote nodes are routed through * crmd on the cluster node hosting the remote connection. That cluster node * is considered the router node for the action. */ # define XML_LRM_ATTR_ROUTER_NODE "router_node" # define XML_LRM_ATTR_RSCID "rsc-id" # define XML_LRM_ATTR_OPSTATUS "op-status" # define XML_LRM_ATTR_RC "rc-code" # define XML_LRM_ATTR_CALLID "call-id" # define XML_LRM_ATTR_OP_DIGEST "op-digest" # define XML_LRM_ATTR_OP_RESTART "op-force-restart" # define XML_LRM_ATTR_OP_SECURE "op-secure-params" # define XML_LRM_ATTR_RESTART_DIGEST "op-restart-digest" # define XML_LRM_ATTR_SECURE_DIGEST "op-secure-digest" # define XML_LRM_ATTR_EXIT_REASON "exit-reason" # define XML_RSC_OP_LAST_CHANGE "last-rc-change" # define XML_RSC_OP_LAST_RUN "last-run" # define XML_RSC_OP_T_EXEC "exec-time" # define XML_RSC_OP_T_QUEUE "queue-time" # define XML_LRM_ATTR_MIGRATE_SOURCE "migrate_source" # define XML_LRM_ATTR_MIGRATE_TARGET "migrate_target" # define XML_TAG_GRAPH "transition_graph" # define XML_GRAPH_TAG_RSC_OP "rsc_op" # define XML_GRAPH_TAG_PSEUDO_EVENT "pseudo_event" # define XML_GRAPH_TAG_CRM_EVENT "crm_event" # define XML_GRAPH_TAG_DOWNED "downed" # define XML_TAG_RULE "rule" # define XML_RULE_ATTR_SCORE "score" # define XML_RULE_ATTR_SCORE_ATTRIBUTE "score-attribute" /* following has no use (hardly ever meaningful); kept for compatibility */ # define XML_RULE_ATTR_SCORE_MANGLED "score-attribute-mangled" # define XML_RULE_ATTR_ROLE "role" # define XML_RULE_ATTR_RESULT "result" # define XML_RULE_ATTR_BOOLEAN_OP "boolean-op" # define XML_TAG_EXPRESSION "expression" # define XML_EXPR_ATTR_ATTRIBUTE "attribute" # define XML_EXPR_ATTR_OPERATION "operation" # define XML_EXPR_ATTR_VALUE "value" # define XML_EXPR_ATTR_TYPE "type" # define XML_CONS_TAG_RSC_DEPEND "rsc_colocation" # define XML_CONS_TAG_RSC_ORDER "rsc_order" # define XML_CONS_TAG_RSC_LOCATION "rsc_location" # define XML_CONS_TAG_RSC_TICKET "rsc_ticket" # define XML_CONS_TAG_RSC_SET "resource_set" # define XML_CONS_ATTR_SYMMETRICAL "symmetrical" # define XML_LOCATION_ATTR_DISCOVERY "resource-discovery" # define XML_COLOC_ATTR_SOURCE "rsc" # define XML_COLOC_ATTR_SOURCE_ROLE "rsc-role" # define XML_COLOC_ATTR_TARGET "with-rsc" # define XML_COLOC_ATTR_TARGET_ROLE "with-rsc-role" # define XML_COLOC_ATTR_NODE_ATTR "node-attribute" # define XML_COLOC_ATTR_SOURCE_INSTANCE "rsc-instance" # define XML_COLOC_ATTR_TARGET_INSTANCE "with-rsc-instance" +# define XML_LOC_ATTR_SOURCE "rsc" +# define XML_LOC_ATTR_SOURCE_PATTERN "rsc-pattern" + # define XML_ORDER_ATTR_FIRST "first" # define XML_ORDER_ATTR_THEN "then" # define XML_ORDER_ATTR_FIRST_ACTION "first-action" # define XML_ORDER_ATTR_THEN_ACTION "then-action" # define XML_ORDER_ATTR_FIRST_INSTANCE "first-instance" # define XML_ORDER_ATTR_THEN_INSTANCE "then-instance" # define XML_ORDER_ATTR_KIND "kind" # define XML_TICKET_ATTR_TICKET "ticket" # define XML_TICKET_ATTR_LOSS_POLICY "loss-policy" # define XML_NVPAIR_ATTR_NAME "name" # define XML_NVPAIR_ATTR_VALUE "value" # define XML_NODE_ATTR_STATE "state" # define XML_NODE_ATTR_RSC_DISCOVERY "resource-discovery-enabled" # define XML_CONFIG_ATTR_DC_DEADTIME "dc-deadtime" # define XML_CONFIG_ATTR_ELECTION_FAIL "election-timeout" # define XML_CONFIG_ATTR_FORCE_QUIT "shutdown-escalation" # define XML_CONFIG_ATTR_RECHECK "cluster-recheck-interval" # define XML_ALERT_ATTR_PATH "path" # define XML_ALERT_ATTR_TIMEOUT "timeout" # define XML_ALERT_ATTR_TSTAMP_FORMAT "timestamp-format" # define XML_ALERT_ATTR_REC_VALUE "value" # define XML_CIB_TAG_GENERATION_TUPPLE "generation_tuple" # define XML_ATTR_TRANSITION_MAGIC "transition-magic" # define XML_ATTR_TRANSITION_KEY "transition-key" # define XML_ATTR_TE_NOWAIT "op_no_wait" # define XML_ATTR_TE_TARGET_RC "op_target_rc" # define XML_ATTR_LRM_PROBE "lrm-is-probe" # define XML_TAG_TRANSIENT_NODEATTRS "transient_attributes" # define XML_TAG_DIFF_ADDED "diff-added" # define XML_TAG_DIFF_REMOVED "diff-removed" # define XML_ACL_TAG_USER "acl_target" # define XML_ACL_TAG_USERv1 "acl_user" # define XML_ACL_TAG_GROUP "acl_group" # define XML_ACL_TAG_ROLE "acl_role" # define XML_ACL_TAG_PERMISSION "acl_permission" # define XML_ACL_TAG_ROLE_REF "role" # define XML_ACL_TAG_ROLE_REFv1 "role_ref" # define XML_ACL_ATTR_KIND "kind" # define XML_ACL_TAG_READ "read" # define XML_ACL_TAG_WRITE "write" # define XML_ACL_TAG_DENY "deny" # define XML_ACL_ATTR_REF "reference" # define XML_ACL_ATTR_REFv1 "ref" # define XML_ACL_ATTR_TAG "object-type" # define XML_ACL_ATTR_TAGv1 "tag" # define XML_ACL_ATTR_XPATH "xpath" # define XML_ACL_ATTR_ATTRIBUTE "attribute" # define XML_CIB_TAG_TICKETS "tickets" # define XML_CIB_TAG_TICKET_STATE "ticket_state" # define XML_CIB_TAG_TAGS "tags" # define XML_CIB_TAG_TAG "tag" # define XML_CIB_TAG_OBJ_REF "obj_ref" # define XML_TAG_FENCING_TOPOLOGY "fencing-topology" # define XML_TAG_FENCING_LEVEL "fencing-level" # define XML_ATTR_STONITH_INDEX "index" # define XML_ATTR_STONITH_TARGET "target" # define XML_ATTR_STONITH_TARGET_VALUE "target-value" # define XML_ATTR_STONITH_TARGET_PATTERN "target-pattern" # define XML_ATTR_STONITH_TARGET_ATTRIBUTE "target-attribute" # define XML_ATTR_STONITH_DEVICES "devices" # define XML_TAG_DIFF "diff" # define XML_DIFF_VERSION "version" # define XML_DIFF_VSOURCE "source" # define XML_DIFF_VTARGET "target" # define XML_DIFF_CHANGE "change" # define XML_DIFF_LIST "change-list" # define XML_DIFF_ATTR "change-attr" # define XML_DIFF_RESULT "change-result" # define XML_DIFF_OP "operation" # define XML_DIFF_PATH "path" # define XML_DIFF_POSITION "position" /* Defined for backward API compatibility but no longer used by Pacemaker */ # define XML_ATTR_TE_ALLOWFAIL "op_allow_fail" # include # define ID(x) crm_element_value(x, XML_ATTR_ID) # define INSTANCE(x) crm_element_value(x, XML_CIB_ATTR_INSTANCE) # define TSTAMP(x) crm_element_value(x, XML_ATTR_TSTAMP) # define TYPE(x) crm_element_name(x) # define NAME(x) crm_element_value(x, XML_NVPAIR_ATTR_NAME) # define VALUE(x) crm_element_value(x, XML_NVPAIR_ATTR_VALUE) #endif diff --git a/pengine/constraints.c b/pengine/constraints.c index cf995dd368..eca4c25c2c 100644 --- a/pengine/constraints.c +++ b/pengine/constraints.c @@ -1,2905 +1,2905 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum pe_order_kind { pe_order_kind_optional, pe_order_kind_mandatory, pe_order_kind_serialize, }; #define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \ __rsc = pe_find_constraint_resource(data_set->resources, __name); \ if(__rsc == NULL) { \ crm_config_err("%s: No resource found for %s", __set, __name); \ return FALSE; \ } \ } while(0) enum pe_ordering get_flags(const char *id, enum pe_order_kind kind, const char *action_first, const char *action_then, gboolean invert); enum pe_ordering get_asymmetrical_flags(enum pe_order_kind kind); static rsc_to_node_t *generate_location_rule(resource_t * rsc, xmlNode * rule_xml, const char *discovery, pe_working_set_t * data_set, pe_re_match_data_t * match_data); gboolean unpack_constraints(xmlNode * xml_constraints, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; xmlNode *lifetime = NULL; for (xml_obj = __xml_first_child(xml_constraints); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *tag = crm_element_name(xml_obj); if (id == NULL) { crm_config_err("Constraint <%s...> must have an id", tag); continue; } crm_trace("Processing constraint %s %s", tag, id); lifetime = first_named_child(xml_obj, "lifetime"); if (lifetime) { crm_config_warn("Support for the lifetime tag, used by %s, is deprecated." " The rules it contains should instead be direct descendents of the constraint object", id); } if (test_ruleset(lifetime, NULL, data_set->now) == FALSE) { crm_info("Constraint %s %s is not active", tag, id); } else if (safe_str_eq(XML_CONS_TAG_RSC_ORDER, tag)) { unpack_rsc_order(xml_obj, data_set); } else if (safe_str_eq(XML_CONS_TAG_RSC_DEPEND, tag)) { unpack_rsc_colocation(xml_obj, data_set); } else if (safe_str_eq(XML_CONS_TAG_RSC_LOCATION, tag)) { unpack_location(xml_obj, data_set); } else if (safe_str_eq(XML_CONS_TAG_RSC_TICKET, tag)) { unpack_rsc_ticket(xml_obj, data_set); } else { pe_err("Unsupported constraint type: %s", tag); } } return TRUE; } static const char * invert_action(const char *action) { if (safe_str_eq(action, RSC_START)) { return RSC_STOP; } else if (safe_str_eq(action, RSC_STOP)) { return RSC_START; } else if (safe_str_eq(action, RSC_PROMOTE)) { return RSC_DEMOTE; } else if (safe_str_eq(action, RSC_DEMOTE)) { return RSC_PROMOTE; } else if (safe_str_eq(action, RSC_PROMOTED)) { return RSC_DEMOTED; } else if (safe_str_eq(action, RSC_DEMOTED)) { return RSC_PROMOTED; } else if (safe_str_eq(action, RSC_STARTED)) { return RSC_STOPPED; } else if (safe_str_eq(action, RSC_STOPPED)) { return RSC_STARTED; } crm_config_warn("Unknown action: %s", action); return NULL; } static enum pe_order_kind get_ordering_type(xmlNode * xml_obj) { enum pe_order_kind kind_e = pe_order_kind_mandatory; const char *kind = crm_element_value(xml_obj, XML_ORDER_ATTR_KIND); if (kind == NULL) { const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); kind_e = pe_order_kind_mandatory; if (score) { int score_i = char2score(score); if (score_i == 0) { kind_e = pe_order_kind_optional; } /* } else if(rsc_then->variant == pe_native && rsc_first->variant > pe_group) { */ /* kind_e = pe_order_kind_optional; */ } } else if (safe_str_eq(kind, "Mandatory")) { kind_e = pe_order_kind_mandatory; } else if (safe_str_eq(kind, "Optional")) { kind_e = pe_order_kind_optional; } else if (safe_str_eq(kind, "Serialize")) { kind_e = pe_order_kind_serialize; } else { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); crm_config_err("Constraint %s: Unknown type '%s'", id, kind); } return kind_e; } static resource_t * pe_find_constraint_resource(GListPtr rsc_list, const char *id) { GListPtr rIter = NULL; for (rIter = rsc_list; id && rIter; rIter = rIter->next) { resource_t *parent = rIter->data; resource_t *match = parent->fns->find_rsc(parent, id, NULL, pe_find_renamed | pe_find_current); if (match != NULL) { if(safe_str_neq(match->id, id)) { /* We found an instance of a clone instead */ match = uber_parent(match); crm_debug("Found %s for %s", match->id, id); } return match; } } crm_trace("No match for %s", id); return NULL; } static gboolean pe_find_constraint_tag(pe_working_set_t * data_set, const char * id, tag_t ** tag) { gboolean rc = FALSE; *tag = NULL; rc = g_hash_table_lookup_extended(data_set->template_rsc_sets, id, NULL, (gpointer*) tag); if (rc == FALSE) { rc = g_hash_table_lookup_extended(data_set->tags, id, NULL, (gpointer*) tag); if (rc == FALSE) { crm_config_warn("No template/tag named '%s'", id); return FALSE; } else if (*tag == NULL) { crm_config_warn("No resource is tagged with '%s'", id); return FALSE; } } else if (*tag == NULL) { crm_config_warn("No resource is derived from template '%s'", id); return FALSE; } return rc; } static gboolean valid_resource_or_tag(pe_working_set_t * data_set, const char * id, resource_t ** rsc, tag_t ** tag) { gboolean rc = FALSE; if (rsc) { *rsc = NULL; *rsc = pe_find_constraint_resource(data_set->resources, id); if (*rsc) { return TRUE; } } if (tag) { *tag = NULL; rc = pe_find_constraint_tag(data_set, id, tag); } return rc; } static gboolean unpack_simple_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) { int order_id = 0; resource_t *rsc_then = NULL; resource_t *rsc_first = NULL; gboolean invert_bool = TRUE; int min_required_before = 0; enum pe_order_kind kind = pe_order_kind_mandatory; enum pe_ordering cons_weight = pe_order_optional; const char *id_first = NULL; const char *id_then = NULL; const char *action_then = NULL; const char *action_first = NULL; const char *instance_then = NULL; const char *instance_first = NULL; const char *require_all_s = NULL; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *invert = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); crm_str_to_boolean(invert, &invert_bool); if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } else if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } id_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN); id_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST); action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION); action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION); instance_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_INSTANCE); instance_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_INSTANCE); if (action_first == NULL) { action_first = RSC_START; } if (action_then == NULL) { action_then = action_first; } if (id_then == NULL || id_first == NULL) { crm_config_err("Constraint %s needs two sides lh: %s rh: %s", id, crm_str(id_then), crm_str(id_first)); return FALSE; } rsc_then = pe_find_constraint_resource(data_set->resources, id_then); rsc_first = pe_find_constraint_resource(data_set->resources, id_first); if (rsc_then == NULL) { crm_config_err("Constraint %s: no resource found for name '%s'", id, id_then); return FALSE; } else if (rsc_first == NULL) { crm_config_err("Constraint %s: no resource found for name '%s'", id, id_first); return FALSE; } else if (instance_then && rsc_then->variant < pe_clone) { crm_config_err("Invalid constraint '%s':" " Resource '%s' is not a clone but instance %s was requested", id, id_then, instance_then); return FALSE; } else if (instance_first && rsc_first->variant < pe_clone) { crm_config_err("Invalid constraint '%s':" " Resource '%s' is not a clone but instance %s was requested", id, id_first, instance_first); return FALSE; } if (instance_then) { rsc_then = find_clone_instance(rsc_then, instance_then, data_set); if (rsc_then == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_then, id_then); return FALSE; } } if (instance_first) { rsc_first = find_clone_instance(rsc_first, instance_first, data_set); if (rsc_first == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_first, id_first); return FALSE; } } require_all_s = crm_element_value(xml_obj, "require-all"); if (require_all_s && crm_is_true(require_all_s) == FALSE && rsc_first->variant >= pe_clone) { /* require-all=false means only one instance of the clone is required */ min_required_before = 1; } else if (rsc_first->variant >= pe_clone) { const char *min_clones_s = g_hash_table_lookup(rsc_first->meta, XML_RSC_ATTR_INCARNATION_MIN); if (min_clones_s) { /* if clone min is set, we require at a minimum X number of instances * to be runnable before allowing dependencies to be runnable. */ min_required_before = crm_parse_int(min_clones_s, "0"); } } cons_weight = pe_order_optional; kind = get_ordering_type(xml_obj); if (kind == pe_order_kind_optional && rsc_then->restart_type == pe_restart_restart) { crm_trace("Upgrade : recovery - implies right"); cons_weight |= pe_order_implies_then; } if (invert_bool == FALSE) { cons_weight |= get_asymmetrical_flags(kind); } else { cons_weight |= get_flags(id, kind, action_first, action_then, FALSE); } /* If there is a minimum number of instances that must be runnable before * the 'then' action is runnable, we use a pseudo action as an intermediate step * start min number of clones -> pseudo action is runnable -> dependency runnable. */ if (min_required_before) { GListPtr rIter = NULL; char *task = crm_concat(CRM_OP_RELAXED_CLONE, id, ':'); action_t *unordered_action = get_pseudo_op(task, data_set); free(task); /* require the pseudo action to have "min_required_before" number of * actions to be considered runnable before allowing the pseudo action * to be runnable. */ unordered_action->required_runnable_before = min_required_before; update_action_flags(unordered_action, pe_action_requires_any, __FUNCTION__); for (rIter = rsc_first->children; id && rIter; rIter = rIter->next) { resource_t *child = rIter->data; /* order each clone instance before the pseudo action */ custom_action_order(child, generate_op_key(child->id, action_first, 0), NULL, NULL, NULL, unordered_action, pe_order_one_or_more | pe_order_implies_then_printed, data_set); } /* order the "then" dependency to occur after the pseudo action only if * the pseudo action is runnable */ order_id = custom_action_order(NULL, NULL, unordered_action, rsc_then, generate_op_key(rsc_then->id, action_then, 0), NULL, cons_weight | pe_order_runnable_left, data_set); } else { order_id = new_rsc_order(rsc_first, action_first, rsc_then, action_then, cons_weight, data_set); } pe_rsc_trace(rsc_first, "order-%d (%s): %s_%s before %s_%s flags=0x%.6x", order_id, id, rsc_first->id, action_first, rsc_then->id, action_then, cons_weight); if (invert_bool == FALSE) { return TRUE; } else if (invert && kind == pe_order_kind_serialize) { crm_config_warn("Cannot invert serialized constraint set %s", id); return TRUE; } else if (kind == pe_order_kind_serialize) { return TRUE; } action_then = invert_action(action_then); action_first = invert_action(action_first); if (action_then == NULL || action_first == NULL) { crm_config_err("Cannot invert rsc_order constraint %s." " Please specify the inverse manually.", id); return TRUE; } cons_weight = pe_order_optional; if (kind == pe_order_kind_optional && rsc_then->restart_type == pe_restart_restart) { crm_trace("Upgrade : recovery - implies left"); cons_weight |= pe_order_implies_first; } cons_weight |= get_flags(id, kind, action_first, action_then, TRUE); order_id = new_rsc_order(rsc_then, action_then, rsc_first, action_first, cons_weight, data_set); pe_rsc_trace(rsc_then, "order-%d (%s): %s_%s before %s_%s flags=0x%.6x", order_id, id, rsc_then->id, action_then, rsc_first->id, action_first, cons_weight); return TRUE; } static gboolean expand_tags_in_sets(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { xmlNode *new_xml = NULL; xmlNode *set = NULL; gboolean any_refs = FALSE; const char *cons_id = NULL; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } new_xml = copy_xml(xml_obj); cons_id = ID(new_xml); for (set = __xml_first_child(new_xml); set != NULL; set = __xml_next_element(set)) { xmlNode *xml_rsc = NULL; GListPtr tag_refs = NULL; GListPtr gIter = NULL; if (safe_str_neq((const char *)set->name, XML_CONS_TAG_RSC_SET)) { continue; } for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { resource_t *rsc = NULL; tag_t *tag = NULL; const char *id = ID(xml_rsc); if (safe_str_neq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF)) { continue; } if (valid_resource_or_tag(data_set, id, &rsc, &tag) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", cons_id, id); free_xml(new_xml); return FALSE; } else if (rsc) { continue; } else if (tag) { /* The resource_ref under the resource_set references a template/tag */ xmlNode *last_ref = xml_rsc; /* A sample: Original XML: Now we are appending rsc2 and rsc3 which are tagged with tag1 right after it: */ for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *obj_ref = (const char *) gIter->data; xmlNode *new_rsc_ref = NULL; new_rsc_ref = xmlNewDocRawNode(getDocPtr(set), NULL, (const xmlChar *)XML_TAG_RESOURCE_REF, NULL); crm_xml_add(new_rsc_ref, XML_ATTR_ID, obj_ref); xmlAddNextSibling(last_ref, new_rsc_ref); last_ref = new_rsc_ref; } any_refs = TRUE; /* Do not directly free ''. That would break the further __xml_next_element(xml_rsc)) and cause "Invalid read" seen by valgrind. So just record it into a hash table for freeing it later. */ tag_refs = g_list_append(tag_refs, xml_rsc); } } /* Now free '', and finally get: */ for (gIter = tag_refs; gIter != NULL; gIter = gIter->next) { xmlNode *tag_ref = gIter->data; free_xml(tag_ref); } g_list_free(tag_refs); } if (any_refs) { *expanded_xml = new_xml; } else { free_xml(new_xml); } return TRUE; } static gboolean tag_to_set(xmlNode * xml_obj, xmlNode ** rsc_set, const char * attr, gboolean convert_rsc, pe_working_set_t * data_set) { const char *cons_id = NULL; const char *id = NULL; resource_t *rsc = NULL; tag_t *tag = NULL; *rsc_set = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } if (attr == NULL) { crm_config_err("No attribute name to process."); return FALSE; } cons_id = crm_element_value(xml_obj, XML_ATTR_ID); if (cons_id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } id = crm_element_value(xml_obj, attr); if (id == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id, &rsc, &tag) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", cons_id, id); return FALSE; } else if (tag) { GListPtr gIter = NULL; /* A template/tag is referenced by the "attr" attribute (first, then, rsc or with-rsc). Add the template/tag's corresponding "resource_set" which contains the resources derived from it or tagged with it under the constraint. */ *rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET); crm_xml_add(*rsc_set, XML_ATTR_ID, id); for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *obj_ref = (const char *) gIter->data; xmlNode *rsc_ref = NULL; rsc_ref = create_xml_node(*rsc_set, XML_TAG_RESOURCE_REF); crm_xml_add(rsc_ref, XML_ATTR_ID, obj_ref); } /* Set sequential="false" for the resource_set */ crm_xml_add(*rsc_set, "sequential", XML_BOOLEAN_FALSE); } else if (rsc && convert_rsc) { /* Even a regular resource is referenced by "attr", convert it into a resource_set. Because the other side of the constraint could be a template/tag reference. */ xmlNode *rsc_ref = NULL; *rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET); crm_xml_add(*rsc_set, XML_ATTR_ID, id); rsc_ref = create_xml_node(*rsc_set, XML_TAG_RESOURCE_REF); crm_xml_add(rsc_ref, XML_ATTR_ID, id); } else { return TRUE; } /* Remove the "attr" attribute referencing the template/tag */ if (*rsc_set) { xml_remove_prop(xml_obj, attr); } return TRUE; } static gboolean unpack_rsc_location(xmlNode * xml_obj, resource_t * rsc_lh, const char * role, const char * score, pe_working_set_t * data_set, pe_re_match_data_t * match_data); static gboolean unpack_simple_location(xmlNode * xml_obj, pe_working_set_t * data_set) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); - const char *value = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); + const char *value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); if(value) { resource_t *rsc_lh = pe_find_constraint_resource(data_set->resources, value); return unpack_rsc_location(xml_obj, rsc_lh, NULL, NULL, data_set, NULL); } - value = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE"-pattern"); + value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE_PATTERN); if(value) { regex_t *r_patt = calloc(1, sizeof(regex_t)); bool invert = FALSE; GListPtr rIter = NULL; if(value[0] == '!') { value++; invert = TRUE; } if (regcomp(r_patt, value, REG_EXTENDED)) { crm_config_err("Bad regex '%s' for constraint '%s'", value, id); regfree(r_patt); free(r_patt); return FALSE; } for (rIter = data_set->resources; rIter; rIter = rIter->next) { resource_t *r = rIter->data; int nregs = 0; regmatch_t *pmatch = NULL; int status; if(r_patt->re_nsub > 0) { nregs = r_patt->re_nsub + 1; } else { nregs = 1; } pmatch = calloc(nregs, sizeof(regmatch_t)); status = regexec(r_patt, r->id, nregs, pmatch, 0); if(invert == FALSE && status == 0) { pe_re_match_data_t match_data = { .string = r->id, .nregs = nregs, .pmatch = pmatch }; crm_debug("'%s' matched '%s' for %s", r->id, value, id); unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, &match_data); } if(invert && status != 0) { crm_debug("'%s' is an inverted match of '%s' for %s", r->id, value, id); unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, NULL); } else { crm_trace("'%s' does not match '%s' for %s", r->id, value, id); } free(pmatch); } regfree(r_patt); free(r_patt); } return FALSE; } static gboolean unpack_rsc_location(xmlNode * xml_obj, resource_t * rsc_lh, const char * role, const char * score, pe_working_set_t * data_set, pe_re_match_data_t * match_data) { gboolean empty = TRUE; rsc_to_node_t *location = NULL; - const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); + const char *id_lh = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *node = crm_element_value(xml_obj, XML_CIB_TAG_NODE); const char *discovery = crm_element_value(xml_obj, XML_LOCATION_ATTR_DISCOVERY); if (rsc_lh == NULL) { /* only a warn as BSC adds the constraint then the resource */ crm_config_warn("No resource (con=%s, rsc=%s)", id, id_lh); return FALSE; } if (score == NULL) { score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); } if (node != NULL && score != NULL) { int score_i = char2score(score); node_t *match = pe_find_node(data_set->nodes, node); if (!match) { return FALSE; } location = rsc2node_new(id, rsc_lh, score_i, discovery, match, data_set); } else { xmlNode *rule_xml = NULL; for (rule_xml = __xml_first_child(xml_obj); rule_xml != NULL; rule_xml = __xml_next_element(rule_xml)) { if (crm_str_eq((const char *)rule_xml->name, XML_TAG_RULE, TRUE)) { empty = FALSE; crm_trace("Unpacking %s/%s", id, ID(rule_xml)); generate_location_rule(rsc_lh, rule_xml, discovery, data_set, match_data); } } if (empty) { crm_config_err("Invalid location constraint %s:" " rsc_location must contain at least one rule", ID(xml_obj)); } } if (role == NULL) { role = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE); } if (location && role) { if (text2role(role) == RSC_ROLE_UNKNOWN) { pe_err("Invalid constraint %s: Bad role %s", id, role); return FALSE; } else { enum rsc_role_e r = text2role(role); switch(r) { case RSC_ROLE_UNKNOWN: case RSC_ROLE_STARTED: case RSC_ROLE_SLAVE: /* Applies to all */ location->role_filter = RSC_ROLE_UNKNOWN; break; default: location->role_filter = r; break; } } } return TRUE; } static gboolean unpack_location_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { const char *id = NULL; const char *id_lh = NULL; const char *state_lh = NULL; resource_t *rsc_lh = NULL; tag_t *tag_lh = NULL; xmlNode *new_xml = NULL; xmlNode *rsc_set_lh = NULL; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } /* Attempt to expand any template/tag references in possible resource sets. */ expand_tags_in_sets(xml_obj, &new_xml, data_set); if (new_xml) { /* There are resource sets referencing templates. Return with the expanded XML. */ crm_log_xml_trace(new_xml, "Expanded rsc_location..."); *expanded_xml = new_xml; return TRUE; } - id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); + id_lh = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE); if (id_lh == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_lh); return FALSE; } else if (rsc_lh) { /* No template is referenced. */ return TRUE; } state_lh = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE); new_xml = copy_xml(xml_obj); /* Convert the template/tag reference in "rsc" into a resource_set under the rsc_location constraint. */ - if (tag_to_set(new_xml, &rsc_set_lh, XML_COLOC_ATTR_SOURCE, FALSE, data_set) == FALSE) { + if (tag_to_set(new_xml, &rsc_set_lh, XML_LOC_ATTR_SOURCE, FALSE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_lh) { if (state_lh) { /* A "rsc-role" is specified. Move it into the converted resource_set as a "role"" attribute. */ crm_xml_add(rsc_set_lh, "role", state_lh); xml_remove_prop(new_xml, XML_RULE_ATTR_ROLE); } crm_log_xml_trace(new_xml, "Expanded rsc_location..."); *expanded_xml = new_xml; } else { /* No sets */ free_xml(new_xml); } return TRUE; } static gboolean unpack_location_set(xmlNode * location, xmlNode * set, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; resource_t *resource = NULL; const char *set_id; const char *role; const char *local_score; if (set == NULL) { crm_config_err("No resource_set object to process."); return FALSE; } set_id = ID(set); if (set_id == NULL) { crm_config_err("resource_set must have an id"); return FALSE; } role = crm_element_value(set, "role"); local_score = crm_element_value(set, XML_RULE_ATTR_SCORE); for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); unpack_rsc_location(location, resource, role, local_score, data_set, NULL); } } return TRUE; } gboolean unpack_location(xmlNode * xml_obj, pe_working_set_t * data_set) { xmlNode *set = NULL; gboolean any_sets = FALSE; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; if (unpack_location_tags(xml_obj, &expanded_xml, data_set) == FALSE) { return FALSE; } if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } for (set = __xml_first_child(xml_obj); set != NULL; set = __xml_next_element(set)) { if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if (unpack_location_set(xml_obj, set, data_set) == FALSE) { if (expanded_xml) { free_xml(expanded_xml); } return FALSE; } } } if (expanded_xml) { free_xml(expanded_xml); xml_obj = orig_xml; } if (any_sets == FALSE) { return unpack_simple_location(xml_obj, data_set); } return TRUE; } static int get_node_score(const char *rule, const char *score, gboolean raw, node_t * node) { int score_f = 0; if (score == NULL) { pe_err("Rule %s: no score specified. Assuming 0.", rule); } else if (raw) { score_f = char2score(score); } else { const char *attr_score = g_hash_table_lookup(node->details->attrs, score); if (attr_score == NULL) { crm_debug("Rule %s: node %s did not have a value for %s", rule, node->details->uname, score); score_f = -INFINITY; } else { crm_debug("Rule %s: node %s had value %s for %s", rule, node->details->uname, attr_score, score); score_f = char2score(attr_score); } } return score_f; } static rsc_to_node_t * generate_location_rule(resource_t * rsc, xmlNode * rule_xml, const char *discovery, pe_working_set_t * data_set, pe_re_match_data_t * match_data) { const char *rule_id = NULL; const char *score = NULL; const char *boolean = NULL; const char *role = NULL; GListPtr gIter = NULL; GListPtr match_L = NULL; gboolean do_and = TRUE; gboolean accept = TRUE; gboolean raw_score = TRUE; gboolean score_allocated = FALSE; rsc_to_node_t *location_rule = NULL; rule_xml = expand_idref(rule_xml, data_set->input); rule_id = crm_element_value(rule_xml, XML_ATTR_ID); boolean = crm_element_value(rule_xml, XML_RULE_ATTR_BOOLEAN_OP); role = crm_element_value(rule_xml, XML_RULE_ATTR_ROLE); crm_trace("Processing rule: %s", rule_id); if (role != NULL && text2role(role) == RSC_ROLE_UNKNOWN) { pe_err("Bad role specified for %s: %s", rule_id, role); return NULL; } score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE); if (score == NULL) { score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE_ATTRIBUTE); if (score != NULL) { raw_score = FALSE; } } if (safe_str_eq(boolean, "or")) { do_and = FALSE; } location_rule = rsc2node_new(rule_id, rsc, 0, discovery, NULL, data_set); if (location_rule == NULL) { return NULL; } if (match_data && match_data->nregs > 0 && match_data->pmatch[0].rm_so != -1) { if (raw_score == FALSE) { char *result = pe_expand_re_matches(score, match_data); if (result) { score = (const char *) result; score_allocated = TRUE; } } } if (role != NULL) { crm_trace("Setting role filter: %s", role); location_rule->role_filter = text2role(role); if (location_rule->role_filter == RSC_ROLE_SLAVE) { /* Any master/slave cannot be promoted without being a slave first * Ergo, any constraint for the slave role applies to every role */ location_rule->role_filter = RSC_ROLE_UNKNOWN; } } if (do_and) { GListPtr gIter = NULL; match_L = node_list_dup(data_set->nodes, TRUE, FALSE); for (gIter = match_L; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node->weight = get_node_score(rule_id, score, raw_score, node); } } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { int score_f = 0; node_t *node = (node_t *) gIter->data; accept = pe_test_rule_re(rule_xml, node->details->attrs, RSC_ROLE_UNKNOWN, data_set->now, match_data); crm_trace("Rule %s %s on %s", ID(rule_xml), accept ? "passed" : "failed", node->details->uname); score_f = get_node_score(rule_id, score, raw_score, node); /* if(accept && score_f == -INFINITY) { */ /* accept = FALSE; */ /* } */ if (accept) { node_t *local = pe_find_node_id(match_L, node->details->id); if (local == NULL && do_and) { continue; } else if (local == NULL) { local = node_copy(node); match_L = g_list_append(match_L, local); } if (do_and == FALSE) { local->weight = merge_weights(local->weight, score_f); } crm_trace("node %s now has weight %d", node->details->uname, local->weight); } else if (do_and && !accept) { /* remove it */ node_t *delete = pe_find_node_id(match_L, node->details->id); if (delete != NULL) { match_L = g_list_remove(match_L, delete); crm_trace("node %s did not match", node->details->uname); } free(delete); } } if (score_allocated == TRUE) { free((char *)score); } location_rule->node_list_rh = match_L; if (location_rule->node_list_rh == NULL) { crm_trace("No matching nodes for rule %s", rule_id); return NULL; } crm_trace("%s: %d nodes matched", rule_id, g_list_length(location_rule->node_list_rh)); return location_rule; } static gint sort_cons_priority_lh(gconstpointer a, gconstpointer b) { const rsc_colocation_t *rsc_constraint1 = (const rsc_colocation_t *)a; const rsc_colocation_t *rsc_constraint2 = (const rsc_colocation_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } CRM_ASSERT(rsc_constraint1->rsc_lh != NULL); CRM_ASSERT(rsc_constraint1->rsc_rh != NULL); if (rsc_constraint1->rsc_lh->priority > rsc_constraint2->rsc_lh->priority) { return -1; } if (rsc_constraint1->rsc_lh->priority < rsc_constraint2->rsc_lh->priority) { return 1; } /* Process clones before primitives and groups */ if (rsc_constraint1->rsc_lh->variant > rsc_constraint2->rsc_lh->variant) { return -1; } else if (rsc_constraint1->rsc_lh->variant < rsc_constraint2->rsc_lh->variant) { return 1; } return strcmp(rsc_constraint1->rsc_lh->id, rsc_constraint2->rsc_lh->id); } static gint sort_cons_priority_rh(gconstpointer a, gconstpointer b) { const rsc_colocation_t *rsc_constraint1 = (const rsc_colocation_t *)a; const rsc_colocation_t *rsc_constraint2 = (const rsc_colocation_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } CRM_ASSERT(rsc_constraint1->rsc_lh != NULL); CRM_ASSERT(rsc_constraint1->rsc_rh != NULL); if (rsc_constraint1->rsc_rh->priority > rsc_constraint2->rsc_rh->priority) { return -1; } if (rsc_constraint1->rsc_rh->priority < rsc_constraint2->rsc_rh->priority) { return 1; } /* Process clones before primitives and groups */ if (rsc_constraint1->rsc_rh->variant > rsc_constraint2->rsc_rh->variant) { return -1; } else if (rsc_constraint1->rsc_rh->variant < rsc_constraint2->rsc_rh->variant) { return 1; } return strcmp(rsc_constraint1->rsc_rh->id, rsc_constraint2->rsc_rh->id); } static void anti_colocation_order(resource_t * first_rsc, int first_role, resource_t * then_rsc, int then_role, pe_working_set_t * data_set) { const char *first_tasks[] = { NULL, NULL }; const char *then_tasks[] = { NULL, NULL }; int first_lpc = 0; int then_lpc = 0; /* Actions to make first_rsc lose first_role */ if (first_role == RSC_ROLE_MASTER) { first_tasks[0] = CRMD_ACTION_DEMOTE; } else { first_tasks[0] = CRMD_ACTION_STOP; if (first_role == RSC_ROLE_SLAVE) { first_tasks[1] = CRMD_ACTION_PROMOTE; } } /* Actions to make then_rsc gain then_role */ if (then_role == RSC_ROLE_MASTER) { then_tasks[0] = CRMD_ACTION_PROMOTE; } else { then_tasks[0] = CRMD_ACTION_START; if (then_role == RSC_ROLE_SLAVE) { then_tasks[1] = CRMD_ACTION_DEMOTE; } } for (first_lpc = 0; first_lpc <= 1 && first_tasks[first_lpc] != NULL; first_lpc++) { for (then_lpc = 0; then_lpc <= 1 && then_tasks[then_lpc] != NULL; then_lpc++) { new_rsc_order(first_rsc, first_tasks[first_lpc], then_rsc, then_tasks[then_lpc], pe_order_anti_colocation, data_set); } } } gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, resource_t * rsc_lh, resource_t * rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t * data_set) { rsc_colocation_t *new_con = NULL; if (rsc_lh == NULL) { crm_config_err("No resource found for LHS %s", id); return FALSE; } else if (rsc_rh == NULL) { crm_config_err("No resource found for RHS of %s", id); return FALSE; } new_con = calloc(1, sizeof(rsc_colocation_t)); if (new_con == NULL) { return FALSE; } if (state_lh == NULL || safe_str_eq(state_lh, RSC_ROLE_STARTED_S)) { state_lh = RSC_ROLE_UNKNOWN_S; } if (state_rh == NULL || safe_str_eq(state_rh, RSC_ROLE_STARTED_S)) { state_rh = RSC_ROLE_UNKNOWN_S; } new_con->id = id; new_con->rsc_lh = rsc_lh; new_con->rsc_rh = rsc_rh; new_con->score = score; new_con->role_lh = text2role(state_lh); new_con->role_rh = text2role(state_rh); new_con->node_attribute = node_attr; if (node_attr == NULL) { node_attr = "#" XML_ATTR_UNAME; } pe_rsc_trace(rsc_lh, "%s ==> %s (%s %d)", rsc_lh->id, rsc_rh->id, node_attr, score); rsc_lh->rsc_cons = g_list_insert_sorted(rsc_lh->rsc_cons, new_con, sort_cons_priority_rh); rsc_rh->rsc_cons_lhs = g_list_insert_sorted(rsc_rh->rsc_cons_lhs, new_con, sort_cons_priority_lh); data_set->colocation_constraints = g_list_append(data_set->colocation_constraints, new_con); if (score <= -INFINITY) { anti_colocation_order(rsc_lh, new_con->role_lh, rsc_rh, new_con->role_rh, data_set); anti_colocation_order(rsc_rh, new_con->role_rh, rsc_lh, new_con->role_lh, data_set); } return TRUE; } /* LHS before RHS */ int new_rsc_order(resource_t * lh_rsc, const char *lh_task, resource_t * rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t * data_set) { char *lh_key = NULL; char *rh_key = NULL; CRM_CHECK(lh_rsc != NULL, return -1); CRM_CHECK(lh_task != NULL, return -1); CRM_CHECK(rh_rsc != NULL, return -1); CRM_CHECK(rh_task != NULL, return -1); /* We no longer need to test if these reference stonith resources * now that stonithd has access to them even when they're not "running" * if (validate_order_resources(lh_rsc, lh_task, rh_rsc, rh_task)) { return -1; } */ lh_key = generate_op_key(lh_rsc->id, lh_task, 0); rh_key = generate_op_key(rh_rsc->id, rh_task, 0); return custom_action_order(lh_rsc, lh_key, NULL, rh_rsc, rh_key, NULL, type, data_set); } static char * task_from_action_or_key(action_t *action, const char *key) { char *res = NULL; char *rsc_id = NULL; char *op_type = NULL; int interval = 0; if (action) { res = strdup(action->task); } else if (key) { int rc = 0; rc = parse_op_key(key, &rsc_id, &op_type, &interval); if (rc == TRUE) { res = op_type; op_type = NULL; } free(rsc_id); free(op_type); } return res; } /* when order constraints are made between two resources start and stop actions * those constraints have to be mirrored against the corresponding * migration actions to ensure start/stop ordering is preserved during * a migration */ static void handle_migration_ordering(order_constraint_t *order, pe_working_set_t *data_set) { char *lh_task = NULL; char *rh_task = NULL; gboolean rh_migratable; gboolean lh_migratable; if (order->lh_rsc == NULL || order->rh_rsc == NULL) { return; } else if (order->lh_rsc == order->rh_rsc) { return; /* don't mess with those constraints built between parent * resources and the children */ } else if (is_parent(order->lh_rsc, order->rh_rsc)) { return; } else if (is_parent(order->rh_rsc, order->lh_rsc)) { return; } lh_migratable = is_set(order->lh_rsc->flags, pe_rsc_allow_migrate); rh_migratable = is_set(order->rh_rsc->flags, pe_rsc_allow_migrate); /* one of them has to be migratable for * the migrate ordering logic to be applied */ if (lh_migratable == FALSE && rh_migratable == FALSE) { return; } /* at this point we have two resources which allow migrations that have an * order dependency set between them. If those order dependencies involve * start/stop actions, we need to mirror the corresponding migrate actions * so order will be preserved. */ lh_task = task_from_action_or_key(order->lh_action, order->lh_action_task); rh_task = task_from_action_or_key(order->rh_action, order->rh_action_task); if (lh_task == NULL || rh_task == NULL) { goto cleanup_order; } if (safe_str_eq(lh_task, RSC_START) && safe_str_eq(rh_task, RSC_START)) { int flags = pe_order_optional; if (lh_migratable && rh_migratable) { /* A start then B start * A migrate_from then B migrate_to */ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_MIGRATED, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); } if (rh_migratable) { if (lh_migratable) { flags |= pe_order_apply_first_non_migratable; } /* A start then B start * A start then B migrate_to... only if A start is not a part of a migration*/ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_START, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); } } else if (rh_migratable == TRUE && safe_str_eq(lh_task, RSC_STOP) && safe_str_eq(rh_task, RSC_STOP)) { int flags = pe_order_optional; if (lh_migratable) { flags |= pe_order_apply_first_non_migratable; } /* rh side is at the bottom of the stack during a stop. If we have a constraint * stop B then stop A, if B is migrating via stop/start, and A is migrating using migration actions, * we need to enforce that A's migrate_to action occurs after B's stop action. */ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_STOP, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); /* We need to build the stop constraint against migrate_from as well * to account for partial migrations. */ if (order->rh_rsc->partial_migration_target) { custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_STOP, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATED, 0), NULL, flags, data_set); } } else if (safe_str_eq(lh_task, RSC_PROMOTE) && safe_str_eq(rh_task, RSC_START)) { int flags = pe_order_optional; if (rh_migratable) { /* A promote then B start * A promote then B migrate_to */ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_PROMOTE, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); } } else if (safe_str_eq(lh_task, RSC_DEMOTE) && safe_str_eq(rh_task, RSC_STOP)) { int flags = pe_order_optional; if (rh_migratable) { /* A demote then B stop * A demote then B migrate_to */ custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_DEMOTE, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATE, 0), NULL, flags, data_set); /* We need to build the demote constraint against migrate_from as well * to account for partial migrations. */ if (order->rh_rsc->partial_migration_target) { custom_action_order(order->lh_rsc, generate_op_key(order->lh_rsc->id, RSC_DEMOTE, 0), NULL, order->rh_rsc, generate_op_key(order->rh_rsc->id, RSC_MIGRATED, 0), NULL, flags, data_set); } } } cleanup_order: free(lh_task); free(rh_task); } /* LHS before RHS */ int custom_action_order(resource_t * lh_rsc, char *lh_action_task, action_t * lh_action, resource_t * rh_rsc, char *rh_action_task, action_t * rh_action, enum pe_ordering type, pe_working_set_t * data_set) { order_constraint_t *order = NULL; if (lh_rsc == NULL && lh_action) { lh_rsc = lh_action->rsc; } if (rh_rsc == NULL && rh_action) { rh_rsc = rh_action->rsc; } if ((lh_action == NULL && lh_rsc == NULL) || (rh_action == NULL && rh_rsc == NULL)) { crm_config_err("Invalid inputs %p.%p %p.%p", lh_rsc, lh_action, rh_rsc, rh_action); free(lh_action_task); free(rh_action_task); return -1; } order = calloc(1, sizeof(order_constraint_t)); crm_trace("Creating[%d] %s %s %s - %s %s %s", data_set->order_id, lh_rsc?lh_rsc->id:"NA", lh_action_task, lh_action?lh_action->uuid:"NA", rh_rsc?rh_rsc->id:"NA", rh_action_task, rh_action?rh_action->uuid:"NA"); /* CRM_ASSERT(data_set->order_id != 291); */ order->id = data_set->order_id++; order->type = type; order->lh_rsc = lh_rsc; order->rh_rsc = rh_rsc; order->lh_action = lh_action; order->rh_action = rh_action; order->lh_action_task = lh_action_task; order->rh_action_task = rh_action_task; if (order->lh_action_task == NULL && lh_action) { order->lh_action_task = strdup(lh_action->uuid); } if (order->rh_action_task == NULL && rh_action) { order->rh_action_task = strdup(rh_action->uuid); } if (order->lh_rsc == NULL && lh_action) { order->lh_rsc = lh_action->rsc; } if (order->rh_rsc == NULL && rh_action) { order->rh_rsc = rh_action->rsc; } data_set->ordering_constraints = g_list_prepend(data_set->ordering_constraints, order); handle_migration_ordering(order, data_set); return order->id; } enum pe_ordering get_asymmetrical_flags(enum pe_order_kind kind) { enum pe_ordering flags = pe_order_optional; if (kind == pe_order_kind_mandatory) { flags |= pe_order_asymmetrical; } else if (kind == pe_order_kind_serialize) { flags |= pe_order_serialize_only; } return flags; } enum pe_ordering get_flags(const char *id, enum pe_order_kind kind, const char *action_first, const char *action_then, gboolean invert) { enum pe_ordering flags = pe_order_optional; if (invert && kind == pe_order_kind_mandatory) { crm_trace("Upgrade %s: implies left", id); flags |= pe_order_implies_first; } else if (kind == pe_order_kind_mandatory) { crm_trace("Upgrade %s: implies right", id); flags |= pe_order_implies_then; if (safe_str_eq(action_first, RSC_START) || safe_str_eq(action_first, RSC_PROMOTE)) { crm_trace("Upgrade %s: runnable", id); flags |= pe_order_runnable_left; } } else if (kind == pe_order_kind_serialize) { flags |= pe_order_serialize_only; } return flags; } static gboolean unpack_order_set(xmlNode * set, enum pe_order_kind kind, resource_t ** rsc, action_t ** begin, action_t ** end, action_t ** inv_begin, action_t ** inv_end, const char *symmetrical, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; GListPtr set_iter = NULL; GListPtr resources = NULL; resource_t *last = NULL; resource_t *resource = NULL; int local_kind = kind; gboolean sequential = FALSE; enum pe_ordering flags = pe_order_optional; char *key = NULL; const char *id = ID(set); const char *action = crm_element_value(set, "action"); const char *sequential_s = crm_element_value(set, "sequential"); const char *kind_s = crm_element_value(set, XML_ORDER_ATTR_KIND); /* char *pseudo_id = NULL; char *end_id = NULL; char *begin_id = NULL; */ if (action == NULL) { action = RSC_START; } if (kind_s) { local_kind = get_ordering_type(set); } if (sequential_s == NULL) { sequential_s = "1"; } sequential = crm_is_true(sequential_s); if (crm_is_true(symmetrical)) { flags = get_flags(id, local_kind, action, action, FALSE); } else { flags = get_asymmetrical_flags(local_kind); } for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, resource, ID(xml_rsc)); resources = g_list_append(resources, resource); } } if (g_list_length(resources) == 1) { crm_trace("Single set: %s", id); *rsc = resource; *end = NULL; *begin = NULL; *inv_end = NULL; *inv_begin = NULL; goto done; } /* pseudo_id = crm_concat(id, action, '-'); end_id = crm_concat(pseudo_id, "end", '-'); begin_id = crm_concat(pseudo_id, "begin", '-'); */ *rsc = NULL; /* *end = get_pseudo_op(end_id, data_set); *begin = get_pseudo_op(begin_id, data_set); free(pseudo_id); free(begin_id); free(end_id); */ set_iter = resources; while (set_iter != NULL) { resource = (resource_t *) set_iter->data; set_iter = set_iter->next; key = generate_op_key(resource->id, action, 0); /* custom_action_order(NULL, NULL, *begin, resource, strdup(key), NULL, flags|pe_order_implies_first_printed, data_set); custom_action_order(resource, strdup(key), NULL, NULL, NULL, *end, flags|pe_order_implies_then_printed, data_set); */ if (local_kind == pe_order_kind_serialize) { /* Serialize before everything that comes after */ GListPtr gIter = NULL; for (gIter = set_iter; gIter != NULL; gIter = gIter->next) { resource_t *then_rsc = (resource_t *) gIter->data; char *then_key = generate_op_key(then_rsc->id, action, 0); custom_action_order(resource, strdup(key), NULL, then_rsc, then_key, NULL, flags, data_set); } } else if (sequential) { if (last != NULL) { new_rsc_order(last, action, resource, action, flags, data_set); } last = resource; } free(key); } if (crm_is_true(symmetrical) == FALSE) { goto done; } else if (symmetrical && local_kind == pe_order_kind_serialize) { crm_config_warn("Cannot invert serialized constraint set %s", id); goto done; } else if (local_kind == pe_order_kind_serialize) { goto done; } last = NULL; action = invert_action(action); /* pseudo_id = crm_concat(id, action, '-'); end_id = crm_concat(pseudo_id, "end", '-'); begin_id = crm_concat(pseudo_id, "begin", '-'); *inv_end = get_pseudo_op(end_id, data_set); *inv_begin = get_pseudo_op(begin_id, data_set); free(pseudo_id); free(begin_id); free(end_id); */ flags = get_flags(id, local_kind, action, action, TRUE); set_iter = resources; while (set_iter != NULL) { resource = (resource_t *) set_iter->data; set_iter = set_iter->next; /* key = generate_op_key(resource->id, action, 0); custom_action_order(NULL, NULL, *inv_begin, resource, strdup(key), NULL, flags|pe_order_implies_first_printed, data_set); custom_action_order(resource, key, NULL, NULL, NULL, *inv_end, flags|pe_order_implies_then_printed, data_set); */ if (sequential) { if (last != NULL) { new_rsc_order(resource, action, last, action, flags, data_set); } last = resource; } } done: g_list_free(resources); return TRUE; } static gboolean order_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, enum pe_order_kind kind, pe_working_set_t * data_set, gboolean invert, gboolean symmetrical) { xmlNode *xml_rsc = NULL; xmlNode *xml_rsc_2 = NULL; resource_t *rsc_1 = NULL; resource_t *rsc_2 = NULL; const char *action_1 = crm_element_value(set1, "action"); const char *action_2 = crm_element_value(set2, "action"); const char *sequential_1 = crm_element_value(set1, "sequential"); const char *sequential_2 = crm_element_value(set2, "sequential"); const char *require_all_s = crm_element_value(set1, "require-all"); gboolean require_all = require_all_s ? crm_is_true(require_all_s) : TRUE; enum pe_ordering flags = pe_order_none; if (action_1 == NULL) { action_1 = RSC_START; }; if (action_2 == NULL) { action_2 = RSC_START; }; if (invert) { action_1 = invert_action(action_1); action_2 = invert_action(action_2); } if(safe_str_eq(RSC_STOP, action_1) || safe_str_eq(RSC_DEMOTE, action_1)) { /* Assuming: A -> ( B || C) -> D * The one-or-more logic only applies during the start/promote phase * During shutdown neither B nor can shutdown until D is down, so simply turn require_all back on. */ require_all = TRUE; } if (symmetrical == FALSE) { flags = get_asymmetrical_flags(kind); } else { flags = get_flags(id, kind, action_2, action_1, invert); } /* If we have an un-ordered set1, whether it is sequential or not is irrelevant in regards to set2. */ if (!require_all) { char *task = crm_concat(CRM_OP_RELAXED_SET, ID(set1), ':'); action_t *unordered_action = get_pseudo_op(task, data_set); free(task); update_action_flags(unordered_action, pe_action_requires_any, __FUNCTION__); for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (!crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { continue; } EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); /* Add an ordering constraint between every element in set1 and the pseudo action. * If any action in set1 is runnable the pseudo action will be runnable. */ custom_action_order(rsc_1, generate_op_key(rsc_1->id, action_1, 0), NULL, NULL, NULL, unordered_action, pe_order_one_or_more | pe_order_implies_then_printed, data_set); } for (xml_rsc_2 = __xml_first_child(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next_element(xml_rsc_2)) { if (!crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) { continue; } EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); /* Add an ordering constraint between the pseudo action and every element in set2. * If the pseudo action is runnable, every action in set2 will be runnable */ custom_action_order(NULL, NULL, unordered_action, rsc_2, generate_op_key(rsc_2->id, action_2, 0), NULL, flags | pe_order_runnable_left, data_set); } return TRUE; } if (crm_is_true(sequential_1)) { if (invert == FALSE) { /* get the last one */ const char *rid = NULL; for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_1, rid); } else { /* get the first one */ for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); break; } } } } if (crm_is_true(sequential_2)) { if (invert == FALSE) { /* get the first one */ for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); break; } } } else { /* get the last one */ const char *rid = NULL; for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid); } } if (rsc_1 != NULL && rsc_2 != NULL) { new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } else if (rsc_1 != NULL) { for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } else if (rsc_2 != NULL) { xmlNode *xml_rsc = NULL; for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } else { for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_2 = NULL; EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); for (xml_rsc_2 = __xml_first_child(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next_element(xml_rsc_2)) { if (crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); new_rsc_order(rsc_1, action_1, rsc_2, action_2, flags, data_set); } } } } } return TRUE; } static gboolean unpack_order_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { const char *id = NULL; const char *id_first = NULL; const char *id_then = NULL; const char *action_first = NULL; const char *action_then = NULL; resource_t *rsc_first = NULL; resource_t *rsc_then = NULL; tag_t *tag_first = NULL; tag_t *tag_then = NULL; xmlNode *new_xml = NULL; xmlNode *rsc_set_first = NULL; xmlNode *rsc_set_then = NULL; gboolean any_sets = FALSE; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } /* Attempt to expand any template/tag references in possible resource sets. */ expand_tags_in_sets(xml_obj, &new_xml, data_set); if (new_xml) { /* There are resource sets referencing templates/tags. Return with the expanded XML. */ crm_log_xml_trace(new_xml, "Expanded rsc_order..."); *expanded_xml = new_xml; return TRUE; } id_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST); id_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN); if (id_first == NULL || id_then == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id_first, &rsc_first, &tag_first) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_first); return FALSE; } if (valid_resource_or_tag(data_set, id_then, &rsc_then, &tag_then) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_then); return FALSE; } if (rsc_first && rsc_then) { /* Neither side references any template/tag. */ return TRUE; } action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION); action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION); new_xml = copy_xml(xml_obj); /* Convert the template/tag reference in "first" into a resource_set under the order constraint. */ if (tag_to_set(new_xml, &rsc_set_first, XML_ORDER_ATTR_FIRST, TRUE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_first) { if (action_first) { /* A "first-action" is specified. Move it into the converted resource_set as an "action" attribute. */ crm_xml_add(rsc_set_first, "action", action_first); xml_remove_prop(new_xml, XML_ORDER_ATTR_FIRST_ACTION); } any_sets = TRUE; } /* Convert the template/tag reference in "then" into a resource_set under the order constraint. */ if (tag_to_set(new_xml, &rsc_set_then, XML_ORDER_ATTR_THEN, TRUE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_then) { if (action_then) { /* A "then-action" is specified. Move it into the converted resource_set as an "action" attribute. */ crm_xml_add(rsc_set_then, "action", action_then); xml_remove_prop(new_xml, XML_ORDER_ATTR_THEN_ACTION); } any_sets = TRUE; } if (any_sets) { crm_log_xml_trace(new_xml, "Expanded rsc_order..."); *expanded_xml = new_xml; } else { free_xml(new_xml); } return TRUE; } gboolean unpack_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) { gboolean any_sets = FALSE; resource_t *rsc = NULL; /* resource_t *last_rsc = NULL; */ action_t *set_end = NULL; action_t *set_begin = NULL; action_t *set_inv_end = NULL; action_t *set_inv_begin = NULL; xmlNode *set = NULL; xmlNode *last = NULL; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; /* action_t *last_end = NULL; action_t *last_begin = NULL; action_t *last_inv_end = NULL; action_t *last_inv_begin = NULL; */ const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *invert = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); enum pe_order_kind kind = get_ordering_type(xml_obj); gboolean invert_bool = TRUE; gboolean rc = TRUE; if (invert == NULL) { invert = "true"; } invert_bool = crm_is_true(invert); rc = unpack_order_tags(xml_obj, &expanded_xml, data_set); if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } else if (rc == FALSE) { return FALSE; } for (set = __xml_first_child(xml_obj); set != NULL; set = __xml_next_element(set)) { if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if (unpack_order_set(set, kind, &rsc, &set_begin, &set_end, &set_inv_begin, &set_inv_end, invert, data_set) == FALSE) { return FALSE; /* Expand orders in order_rsc_sets() instead of via pseudo actions. */ /* } else if(last) { const char *set_action = crm_element_value(set, "action"); const char *last_action = crm_element_value(last, "action"); enum pe_ordering flags = get_flags(id, kind, last_action, set_action, FALSE); if(!set_action) { set_action = RSC_START; } if(!last_action) { last_action = RSC_START; } if(rsc == NULL && last_rsc == NULL) { order_actions(last_end, set_begin, flags); } else { custom_action_order( last_rsc, null_or_opkey(last_rsc, last_action), last_end, rsc, null_or_opkey(rsc, set_action), set_begin, flags, data_set); } if(crm_is_true(invert)) { set_action = invert_action(set_action); last_action = invert_action(last_action); flags = get_flags(id, kind, last_action, set_action, TRUE); if(rsc == NULL && last_rsc == NULL) { order_actions(last_inv_begin, set_inv_end, flags); } else { custom_action_order( last_rsc, null_or_opkey(last_rsc, last_action), last_inv_begin, rsc, null_or_opkey(rsc, set_action), set_inv_end, flags, data_set); } } */ } else if ( /* never called -- Now call it for supporting clones in resource sets */ last) { if (order_rsc_sets(id, last, set, kind, data_set, FALSE, invert_bool) == FALSE) { return FALSE; } if (invert_bool && order_rsc_sets(id, set, last, kind, data_set, TRUE, invert_bool) == FALSE) { return FALSE; } } last = set; /* last_rsc = rsc; last_end = set_end; last_begin = set_begin; last_inv_end = set_inv_end; last_inv_begin = set_inv_begin; */ } } if (expanded_xml) { free_xml(expanded_xml); xml_obj = orig_xml; } if (any_sets == FALSE) { return unpack_simple_rsc_order(xml_obj, data_set); } return TRUE; } static gboolean unpack_colocation_set(xmlNode * set, int score, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; resource_t *with = NULL; resource_t *resource = NULL; const char *set_id = ID(set); const char *role = crm_element_value(set, "role"); const char *sequential = crm_element_value(set, "sequential"); const char *ordering = crm_element_value(set, "ordering"); int local_score = score; const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE); if (score_s) { local_score = char2score(score_s); } if(ordering == NULL) { ordering = "group"; } if (sequential != NULL && crm_is_true(sequential) == FALSE) { return TRUE; } else if (local_score >= 0 && safe_str_eq(ordering, "group")) { for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); if (with != NULL) { pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id); rsc_colocation_new(set_id, NULL, local_score, resource, with, role, role, data_set); } with = resource; } } } else if (local_score >= 0) { resource_t *last = NULL; for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); if (last != NULL) { pe_rsc_trace(resource, "Colocating %s with %s", last->id, resource->id); rsc_colocation_new(set_id, NULL, local_score, last, resource, role, role, data_set); } last = resource; } } } else { /* Anti-colocating with every prior resource is * the only way to ensure the intuitive result * (ie. that no-one in the set can run with anyone * else in the set) */ for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_with = NULL; EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); for (xml_rsc_with = __xml_first_child(set); xml_rsc_with != NULL; xml_rsc_with = __xml_next_element(xml_rsc_with)) { if (crm_str_eq((const char *)xml_rsc_with->name, XML_TAG_RESOURCE_REF, TRUE)) { if (safe_str_eq(resource->id, ID(xml_rsc_with))) { break; } else if (resource == NULL) { crm_config_err("%s: No resource found for %s", set_id, ID(xml_rsc_with)); return FALSE; } EXPAND_CONSTRAINT_IDREF(set_id, with, ID(xml_rsc_with)); pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id, with->id); rsc_colocation_new(set_id, NULL, local_score, resource, with, role, role, data_set); } } } } } return TRUE; } static gboolean colocate_rsc_sets(const char *id, xmlNode * set1, xmlNode * set2, int score, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; resource_t *rsc_1 = NULL; resource_t *rsc_2 = NULL; const char *role_1 = crm_element_value(set1, "role"); const char *role_2 = crm_element_value(set2, "role"); const char *sequential_1 = crm_element_value(set1, "sequential"); const char *sequential_2 = crm_element_value(set2, "sequential"); if (sequential_1 == NULL || crm_is_true(sequential_1)) { /* get the first one */ for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); break; } } } if (sequential_2 == NULL || crm_is_true(sequential_2)) { /* get the last one */ const char *rid = NULL; for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { rid = ID(xml_rsc); } } EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid); } if (rsc_1 != NULL && rsc_2 != NULL) { rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } else if (rsc_1 != NULL) { for (xml_rsc = __xml_first_child(set2); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } else if (rsc_2 != NULL) { for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } else { for (xml_rsc = __xml_first_child(set1); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { xmlNode *xml_rsc_2 = NULL; EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc)); for (xml_rsc_2 = __xml_first_child(set2); xml_rsc_2 != NULL; xml_rsc_2 = __xml_next_element(xml_rsc_2)) { if (crm_str_eq((const char *)xml_rsc_2->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2)); rsc_colocation_new(id, NULL, score, rsc_1, rsc_2, role_1, role_2, data_set); } } } } } return TRUE; } static gboolean unpack_simple_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) { int score_i = 0; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); const char *id_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET); const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); const char *state_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE); const char *instance_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_INSTANCE); const char *instance_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_INSTANCE); const char *attr = crm_element_value(xml_obj, XML_COLOC_ATTR_NODE_ATTR); const char *symmetrical = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL); resource_t *rsc_lh = pe_find_constraint_resource(data_set->resources, id_lh); resource_t *rsc_rh = pe_find_constraint_resource(data_set->resources, id_rh); if (rsc_lh == NULL) { crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_lh); return FALSE; } else if (rsc_rh == NULL) { crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_rh); return FALSE; } else if (instance_lh && rsc_lh->variant < pe_clone) { crm_config_err ("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_lh, instance_lh); return FALSE; } else if (instance_rh && rsc_rh->variant < pe_clone) { crm_config_err ("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_rh, instance_rh); return FALSE; } if (instance_lh) { rsc_lh = find_clone_instance(rsc_lh, instance_lh, data_set); if (rsc_lh == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_lh, id_lh); return FALSE; } } if (instance_rh) { rsc_rh = find_clone_instance(rsc_rh, instance_rh, data_set); if (rsc_rh == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_rh, id_rh); return FALSE; } } if (crm_is_true(symmetrical)) { crm_config_warn("The %s colocation constraint attribute has been removed." " It didn't do what you think it did anyway.", XML_CONS_ATTR_SYMMETRICAL); } if (score) { score_i = char2score(score); } rsc_colocation_new(id, attr, score_i, rsc_lh, rsc_rh, state_lh, state_rh, data_set); return TRUE; } static gboolean unpack_colocation_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { const char *id = NULL; const char *id_lh = NULL; const char *id_rh = NULL; const char *state_lh = NULL; const char *state_rh = NULL; resource_t *rsc_lh = NULL; resource_t *rsc_rh = NULL; tag_t *tag_lh = NULL; tag_t *tag_rh = NULL; xmlNode *new_xml = NULL; xmlNode *rsc_set_lh = NULL; xmlNode *rsc_set_rh = NULL; gboolean any_sets = FALSE; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } /* Attempt to expand any template/tag references in possible resource sets. */ expand_tags_in_sets(xml_obj, &new_xml, data_set); if (new_xml) { /* There are resource sets referencing templates/tags. Return with the expanded XML. */ crm_log_xml_trace(new_xml, "Expanded rsc_colocation..."); *expanded_xml = new_xml; return TRUE; } id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); id_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET); if (id_lh == NULL || id_rh == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_lh); return FALSE; } if (valid_resource_or_tag(data_set, id_rh, &rsc_rh, &tag_rh) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_rh); return FALSE; } if (rsc_lh && rsc_rh) { /* Neither side references any template/tag. */ return TRUE; } if (tag_lh && tag_rh) { /* A colocation constraint between two templates/tags makes no sense. */ crm_config_err("Either LHS or RHS of %s should be a normal resource instead of a template/tag", id); return FALSE; } state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); state_rh = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE); new_xml = copy_xml(xml_obj); /* Convert the template/tag reference in "rsc" into a resource_set under the colocation constraint. */ if (tag_to_set(new_xml, &rsc_set_lh, XML_COLOC_ATTR_SOURCE, TRUE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_lh) { if (state_lh) { /* A "rsc-role" is specified. Move it into the converted resource_set as a "role"" attribute. */ crm_xml_add(rsc_set_lh, "role", state_lh); xml_remove_prop(new_xml, XML_COLOC_ATTR_SOURCE_ROLE); } any_sets = TRUE; } /* Convert the template/tag reference in "with-rsc" into a resource_set under the colocation constraint. */ if (tag_to_set(new_xml, &rsc_set_rh, XML_COLOC_ATTR_TARGET, TRUE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_rh) { if (state_rh) { /* A "with-rsc-role" is specified. Move it into the converted resource_set as a "role"" attribute. */ crm_xml_add(rsc_set_rh, "role", state_rh); xml_remove_prop(new_xml, XML_COLOC_ATTR_TARGET_ROLE); } any_sets = TRUE; } if (any_sets) { crm_log_xml_trace(new_xml, "Expanded rsc_colocation..."); *expanded_xml = new_xml; } else { free_xml(new_xml); } return TRUE; } gboolean unpack_rsc_colocation(xmlNode * xml_obj, pe_working_set_t * data_set) { int score_i = 0; xmlNode *set = NULL; xmlNode *last = NULL; gboolean any_sets = FALSE; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); gboolean rc = TRUE; if (score) { score_i = char2score(score); } rc = unpack_colocation_tags(xml_obj, &expanded_xml, data_set); if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } else if (rc == FALSE) { return FALSE; } for (set = __xml_first_child(xml_obj); set != NULL; set = __xml_next_element(set)) { if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if (unpack_colocation_set(set, score_i, data_set) == FALSE) { return FALSE; } else if (last && colocate_rsc_sets(id, last, set, score_i, data_set) == FALSE) { return FALSE; } last = set; } } if (expanded_xml) { free_xml(expanded_xml); xml_obj = orig_xml; } if (any_sets == FALSE) { return unpack_simple_colocation(xml_obj, data_set); } return TRUE; } gboolean rsc_ticket_new(const char *id, resource_t * rsc_lh, ticket_t * ticket, const char *state_lh, const char *loss_policy, pe_working_set_t * data_set) { rsc_ticket_t *new_rsc_ticket = NULL; if (rsc_lh == NULL) { crm_config_err("No resource found for LHS %s", id); return FALSE; } new_rsc_ticket = calloc(1, sizeof(rsc_ticket_t)); if (new_rsc_ticket == NULL) { return FALSE; } if (state_lh == NULL || safe_str_eq(state_lh, RSC_ROLE_STARTED_S)) { state_lh = RSC_ROLE_UNKNOWN_S; } new_rsc_ticket->id = id; new_rsc_ticket->ticket = ticket; new_rsc_ticket->rsc_lh = rsc_lh; new_rsc_ticket->role_lh = text2role(state_lh); if (safe_str_eq(loss_policy, "fence")) { crm_debug("On loss of ticket '%s': Fence the nodes running %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_fence; } else if (safe_str_eq(loss_policy, "freeze")) { crm_debug("On loss of ticket '%s': Freeze %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_freeze; } else if (safe_str_eq(loss_policy, "demote")) { crm_debug("On loss of ticket '%s': Demote %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_demote; } else if (safe_str_eq(loss_policy, "stop")) { crm_debug("On loss of ticket '%s': Stop %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_stop; } else { if (new_rsc_ticket->role_lh == RSC_ROLE_MASTER) { crm_debug("On loss of ticket '%s': Default to demote %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_demote; } else { crm_debug("On loss of ticket '%s': Default to stop %s (%s)", new_rsc_ticket->ticket->id, new_rsc_ticket->rsc_lh->id, role2text(new_rsc_ticket->role_lh)); new_rsc_ticket->loss_policy = loss_ticket_stop; } } pe_rsc_trace(rsc_lh, "%s (%s) ==> %s", rsc_lh->id, role2text(new_rsc_ticket->role_lh), ticket->id); rsc_lh->rsc_tickets = g_list_append(rsc_lh->rsc_tickets, new_rsc_ticket); data_set->ticket_constraints = g_list_append(data_set->ticket_constraints, new_rsc_ticket); if (new_rsc_ticket->ticket->granted == FALSE || new_rsc_ticket->ticket->standby) { rsc_ticket_constraint(rsc_lh, new_rsc_ticket, data_set); } return TRUE; } static gboolean unpack_rsc_ticket_set(xmlNode * set, ticket_t * ticket, const char *loss_policy, pe_working_set_t * data_set) { xmlNode *xml_rsc = NULL; resource_t *resource = NULL; const char *set_id = ID(set); const char *role = crm_element_value(set, "role"); if (set == NULL) { crm_config_err("No resource_set object to process."); return FALSE; } if (set_id == NULL) { crm_config_err("resource_set must have an id"); return FALSE; } if (ticket == NULL) { crm_config_err("No dependented ticket specified for '%s'", set_id); return FALSE; } for (xml_rsc = __xml_first_child(set); xml_rsc != NULL; xml_rsc = __xml_next_element(xml_rsc)) { if (crm_str_eq((const char *)xml_rsc->name, XML_TAG_RESOURCE_REF, TRUE)) { EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc)); pe_rsc_trace(resource, "Resource '%s' depends on ticket '%s'", resource->id, ticket->id); rsc_ticket_new(set_id, resource, ticket, role, loss_policy, data_set); } } return TRUE; } static gboolean unpack_simple_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set) { const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET); const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY); ticket_t *ticket = NULL; const char *id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); const char *state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); const char *instance_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_INSTANCE); resource_t *rsc_lh = NULL; if (xml_obj == NULL) { crm_config_err("No rsc_ticket constraint object to process."); return FALSE; } if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } if (ticket_str == NULL) { crm_config_err("Invalid constraint '%s': No ticket specified", id); return FALSE; } else { ticket = g_hash_table_lookup(data_set->tickets, ticket_str); } if (ticket == NULL) { crm_config_err("Invalid constraint '%s': No ticket named '%s'", id, ticket_str); return FALSE; } if (id_lh == NULL) { crm_config_err("Invalid constraint '%s': No resource specified", id); return FALSE; } else { rsc_lh = pe_find_constraint_resource(data_set->resources, id_lh); } if (rsc_lh == NULL) { crm_config_err("Invalid constraint '%s': No resource named '%s'", id, id_lh); return FALSE; } else if (instance_lh && rsc_lh->variant < pe_clone) { crm_config_err ("Invalid constraint '%s': Resource '%s' is not a clone but instance %s was requested", id, id_lh, instance_lh); return FALSE; } if (instance_lh) { rsc_lh = find_clone_instance(rsc_lh, instance_lh, data_set); if (rsc_lh == NULL) { crm_config_warn("Invalid constraint '%s': No instance '%s' of '%s'", id, instance_lh, id_lh); return FALSE; } } rsc_ticket_new(id, rsc_lh, ticket, state_lh, loss_policy, data_set); return TRUE; } static gboolean unpack_rsc_ticket_tags(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set) { const char *id = NULL; const char *id_lh = NULL; const char *state_lh = NULL; resource_t *rsc_lh = NULL; tag_t *tag_lh = NULL; xmlNode *new_xml = NULL; xmlNode *rsc_set_lh = NULL; gboolean any_sets = FALSE; *expanded_xml = NULL; if (xml_obj == NULL) { crm_config_err("No constraint object to process."); return FALSE; } id = crm_element_value(xml_obj, XML_ATTR_ID); if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } /* Attempt to expand any template/tag references in possible resource sets. */ expand_tags_in_sets(xml_obj, &new_xml, data_set); if (new_xml) { /* There are resource sets referencing templates/tags. Return with the expanded XML. */ crm_log_xml_trace(new_xml, "Expanded rsc_ticket..."); *expanded_xml = new_xml; return TRUE; } id_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE); if (id_lh == NULL) { return TRUE; } if (valid_resource_or_tag(data_set, id_lh, &rsc_lh, &tag_lh) == FALSE) { crm_config_err("Constraint '%s': Invalid reference to '%s'", id, id_lh); return FALSE; } else if (rsc_lh) { /* No template/tag is referenced. */ return TRUE; } state_lh = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE); new_xml = copy_xml(xml_obj); /* Convert the template/tag reference in "rsc" into a resource_set under the rsc_ticket constraint. */ if (tag_to_set(new_xml, &rsc_set_lh, XML_COLOC_ATTR_SOURCE, FALSE, data_set) == FALSE) { free_xml(new_xml); return FALSE; } if (rsc_set_lh) { if (state_lh) { /* A "rsc-role" is specified. Move it into the converted resource_set as a "role"" attribute. */ crm_xml_add(rsc_set_lh, "role", state_lh); xml_remove_prop(new_xml, XML_COLOC_ATTR_SOURCE_ROLE); } any_sets = TRUE; } if (any_sets) { crm_log_xml_trace(new_xml, "Expanded rsc_ticket..."); *expanded_xml = new_xml; } else { free_xml(new_xml); } return TRUE; } gboolean unpack_rsc_ticket(xmlNode * xml_obj, pe_working_set_t * data_set) { xmlNode *set = NULL; gboolean any_sets = FALSE; const char *id = crm_element_value(xml_obj, XML_ATTR_ID); const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET); const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY); ticket_t *ticket = NULL; xmlNode *orig_xml = NULL; xmlNode *expanded_xml = NULL; gboolean rc = TRUE; if (xml_obj == NULL) { crm_config_err("No rsc_ticket constraint object to process."); return FALSE; } if (id == NULL) { crm_config_err("%s constraint must have an id", crm_element_name(xml_obj)); return FALSE; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket); } if (ticket_str == NULL) { crm_config_err("Invalid constraint '%s': No ticket specified", id); return FALSE; } else { ticket = g_hash_table_lookup(data_set->tickets, ticket_str); } if (ticket == NULL) { ticket = ticket_new(ticket_str, data_set); if (ticket == NULL) { return FALSE; } } rc = unpack_rsc_ticket_tags(xml_obj, &expanded_xml, data_set); if (expanded_xml) { orig_xml = xml_obj; xml_obj = expanded_xml; } else if (rc == FALSE) { return FALSE; } for (set = __xml_first_child(xml_obj); set != NULL; set = __xml_next_element(set)) { if (crm_str_eq((const char *)set->name, XML_CONS_TAG_RSC_SET, TRUE)) { any_sets = TRUE; set = expand_idref(set, data_set->input); if (unpack_rsc_ticket_set(set, ticket, loss_policy, data_set) == FALSE) { return FALSE; } } } if (expanded_xml) { free_xml(expanded_xml); xml_obj = orig_xml; } if (any_sets == FALSE) { return unpack_simple_rsc_ticket(xml_obj, data_set); } return TRUE; } gboolean is_active(rsc_to_node_t * cons) { return TRUE; } diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c index 3e60dc409a..2f7b366920 100644 --- a/tools/crm_resource_ban.c +++ b/tools/crm_resource_ban.c @@ -1,280 +1,280 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include char *move_lifetime = NULL; static char * parse_cli_lifetime(const char *input) { char *later_s = NULL; crm_time_t *now = NULL; crm_time_t *later = NULL; crm_time_t *duration = NULL; if (input == NULL) { return NULL; } duration = crm_time_parse_duration(move_lifetime); if (duration == NULL) { CMD_ERR("Invalid duration specified: %s", move_lifetime); CMD_ERR("Please refer to" " http://en.wikipedia.org/wiki/ISO_8601#Durations" " for examples of valid durations"); return NULL; } now = crm_time_new(NULL); later = crm_time_add(now, duration); crm_time_log(LOG_INFO, "now ", now, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_INFO, "later ", later, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_INFO, "duration", duration, crm_time_log_date | crm_time_log_timeofday); later_s = crm_time_as_string(later, crm_time_log_date | crm_time_log_timeofday); printf("Migration will take effect until: %s\n", later_s); crm_time_free(duration); crm_time_free(later); crm_time_free(now); return later_s; } int cli_resource_ban(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn) { char *later_s = NULL; int rc = pcmk_ok; char *id = NULL; xmlNode *fragment = NULL; xmlNode *location = NULL; if(host == NULL) { GListPtr n = allnodes; for(; n && rc == pcmk_ok; n = n->next) { node_t *target = n->data; rc = cli_resource_ban(rsc_id, target->details->uname, NULL, cib_conn); } return rc; } later_s = parse_cli_lifetime(move_lifetime); if(move_lifetime && later_s == NULL) { return -EINVAL; } fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); id = crm_strdup_printf("cli-ban-%s-on-%s", rsc_id, host); location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(location, XML_ATTR_ID, id); free(id); if (BE_QUIET == FALSE) { CMD_ERR("WARNING: Creating rsc_location constraint '%s'" " with a score of -INFINITY for resource %s" " on %s.", ID(location), rsc_id, host); CMD_ERR("\tThis will prevent %s from %s" " on %s until the constraint is removed using" " the 'crm_resource --clear' command or manually" " with cibadmin", rsc_id, scope_master?"being promoted":"running", host); CMD_ERR("\tThis will be the case even if %s is" " the last node in the cluster", host); CMD_ERR("\tThis message can be disabled with --quiet"); } - crm_xml_add(location, XML_COLOC_ATTR_SOURCE, rsc_id); + crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id); if(scope_master) { crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_MASTER_S); } else { crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S); } if (later_s == NULL) { /* Short form */ crm_xml_add(location, XML_CIB_TAG_NODE, host); crm_xml_add(location, XML_RULE_ATTR_SCORE, MINUS_INFINITY_S); } else { xmlNode *rule = create_xml_node(location, XML_TAG_RULE); xmlNode *expr = create_xml_node(rule, XML_TAG_EXPRESSION); id = crm_strdup_printf("cli-ban-%s-on-%s-rule", rsc_id, host); crm_xml_add(rule, XML_ATTR_ID, id); free(id); crm_xml_add(rule, XML_RULE_ATTR_SCORE, MINUS_INFINITY_S); crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and"); id = crm_strdup_printf("cli-ban-%s-on-%s-expr", rsc_id, host); crm_xml_add(expr, XML_ATTR_ID, id); free(id); crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, "#uname"); crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq"); crm_xml_add(expr, XML_EXPR_ATTR_VALUE, host); crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string"); expr = create_xml_node(rule, "date_expression"); id = crm_strdup_printf("cli-ban-%s-on-%s-lifetime", rsc_id, host); crm_xml_add(expr, XML_ATTR_ID, id); free(id); crm_xml_add(expr, "operation", "lt"); crm_xml_add(expr, "end", later_s); } crm_log_xml_notice(fragment, "Modify"); rc = cib_conn->cmds->update(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); free_xml(fragment); free(later_s); return rc; } int cli_resource_prefer(const char *rsc_id, const char *host, cib_t * cib_conn) { char *later_s = parse_cli_lifetime(move_lifetime); int rc = pcmk_ok; char *id = NULL; xmlNode *location = NULL; xmlNode *fragment = NULL; if(move_lifetime && later_s == NULL) { return -EINVAL; } if(cib_conn == NULL) { free(later_s); return -ENOTCONN; } fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); id = crm_strdup_printf("cli-prefer-%s", rsc_id); location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(location, XML_ATTR_ID, id); free(id); - crm_xml_add(location, XML_COLOC_ATTR_SOURCE, rsc_id); + crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id); if(scope_master) { crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_MASTER_S); } else { crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S); } if (later_s == NULL) { /* Short form */ crm_xml_add(location, XML_CIB_TAG_NODE, host); crm_xml_add(location, XML_RULE_ATTR_SCORE, INFINITY_S); } else { xmlNode *rule = create_xml_node(location, XML_TAG_RULE); xmlNode *expr = create_xml_node(rule, XML_TAG_EXPRESSION); id = crm_concat("cli-prefer-rule", rsc_id, '-'); crm_xml_add(rule, XML_ATTR_ID, id); free(id); crm_xml_add(rule, XML_RULE_ATTR_SCORE, INFINITY_S); crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and"); id = crm_concat("cli-prefer-expr", rsc_id, '-'); crm_xml_add(expr, XML_ATTR_ID, id); free(id); crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, "#uname"); crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq"); crm_xml_add(expr, XML_EXPR_ATTR_VALUE, host); crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string"); expr = create_xml_node(rule, "date_expression"); id = crm_concat("cli-prefer-lifetime-end", rsc_id, '-'); crm_xml_add(expr, XML_ATTR_ID, id); free(id); crm_xml_add(expr, "operation", "lt"); crm_xml_add(expr, "end", later_s); } crm_log_xml_info(fragment, "Modify"); rc = cib_conn->cmds->update(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); free_xml(fragment); free(later_s); return rc; } int cli_resource_clear(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn) { char *id = NULL; int rc = pcmk_ok; xmlNode *fragment = NULL; xmlNode *location = NULL; if(cib_conn == NULL) { return -ENOTCONN; } fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); if(host) { id = crm_strdup_printf("cli-ban-%s-on-%s", rsc_id, host); location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(location, XML_ATTR_ID, id); free(id); } else { GListPtr n = allnodes; for(; n; n = n->next) { node_t *target = n->data; id = crm_strdup_printf("cli-ban-%s-on-%s", rsc_id, target->details->uname); location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(location, XML_ATTR_ID, id); free(id); } } id = crm_strdup_printf("cli-prefer-%s", rsc_id); location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(location, XML_ATTR_ID, id); if(host && do_force == FALSE) { crm_xml_add(location, XML_CIB_TAG_NODE, host); } free(id); crm_log_xml_info(fragment, "Delete"); rc = cib_conn->cmds->delete(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); if (rc == -ENXIO) { rc = pcmk_ok; } else if (rc != pcmk_ok) { goto bail; } bail: free_xml(fragment); return rc; }