diff --git a/configure.ac b/configure.ac index 8ef503c258..ee98f9bf3d 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2145 +1,2149 @@ dnl dnl autoconf for Pacemaker dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.59) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) dnl Workaround autoconf < 2.64 if test x"${PACKAGE_URL}" = x""; then AC_SUBST([PACKAGE_URL], PCMK_URL) fi PCMK_FEATURES="" HB_PKG=heartbeat AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/crm_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) dnl Older distros may need: AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION) AM_INIT_AUTOMAKE([foreign]) AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from dnl normal compilation. When a failure occurs, it will then display the full dnl command line dnl Wrap in m4_ifdef to avoid breaking on older platforms m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd BUILD_ATOMIC_ATTRD=1 dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC gl_EARLY gl_INIT LT_INIT([dlopen]) LTDL_INIT([convenience]) AC_PROG_YACC AM_PROG_LEX AC_C_STRINGIZE AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)],[RC=1; AC_MSG_RESULT(no)]) return $RC } try_extract_header_define() { AC_MSG_CHECKING(if $2 in $1 exists) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) {\n" >> ${Cfile}.c printf "#ifdef %s\n" $2 >> ${Cfile}.c printf "printf(\"%%s\", %s);\n" $2 >> ${Cfile}.c printf "#endif \n return 0; }\n" >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} 2>/dev/null value= if test -x ${Cfile}; then value=`${Cfile} 2>/dev/null` fi if test x"${value}" == x""; then value=$3 AC_MSG_RESULT(default: $value) else AC_MSG_RESULT($value) fi printf $value rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM ${Cfile}.gcno } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM ${Cfile}.gcno } dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=no]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) AC_ARG_ENABLE([quiet], [ --enable-quiet Supress make output unless there is an error [default=no]]) AC_ARG_ENABLE([no-stack], [ --enable-no-stack Only build the Policy Engine and pieces needed to support it [default=no]]) AC_ARG_ENABLE([upstart], [ --enable-upstart Enable support for managing resources via Upstart [default=try]], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], [ --enable-systemd Enable support for managing resources via systemd [default=try]], [], [enable_systemd=try], ) AC_ARG_ENABLE(hardening, [ --with-hardening Harden the resulting executables/libraries (best effort by default)], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) AC_ARG_WITH(ais, [ --with-ais Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH(corosync, [ --with-corosync Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ] dnl initialized in AC_ARG_WITH(ais...) already, dnl don't reset to try if it was given as --without-ais ) AC_ARG_WITH(heartbeat, [ --with-heartbeat Support the Heartbeat messaging and membership layer ], [ SUPPORT_HEARTBEAT=$withval ], [ SUPPORT_HEARTBEAT=try ], ) AC_ARG_WITH(cman, [ --with-cman Support the consumption of membership and quorum from cman ], [ SUPPORT_CMAN=$withval ], [ SUPPORT_CMAN=try ], ) AC_ARG_WITH(cpg, [ --with-cs-quorum Support the consumption of membership and quorum from corosync ], [ SUPPORT_CS_QUORUM=$withval ], [ SUPPORT_CS_QUORUM=try ], ) AC_ARG_WITH(nagios, [ --with-nagios Support nagios remote monitoring ], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) AC_ARG_WITH(nagios-plugin-dir, [ --with-nagios-plugin-dir=DIR Directory for nagios plugins [${NAGIOS_PLUGIN_DIR}]], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH(nagios-metadata-dir, [ --with-nagios-metadata-dir=DIR Directory for nagios plugins metadata [${NAGIOS_METADATA_DIR}]], [ NAGIOS_METADATA_DIR="$withval" ] ) AC_ARG_WITH(snmp, [ --with-snmp Support the SNMP protocol ], [ SUPPORT_SNMP=$withval ], [ SUPPORT_SNMP=try ], ) AC_ARG_WITH(esmtp, [ --with-esmtp Support the sending mail notifications with the esmtp library ], [ SUPPORT_ESMTP=$withval ], [ SUPPORT_ESMTP=try ], ) AC_ARG_WITH(acl, [ --with-acl Support CIB ACL ], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) AC_ARG_WITH(cibsecrets, [ --with-cibsecrets Support CIB secrets ], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) CSPREFIX="" AC_ARG_WITH(ais-prefix, [ --with-ais-prefix=DIR Prefix used when Corosync was installed [$prefix]], [ CSPREFIX=$withval ], [ CSPREFIX=$prefix ]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) SUPPORT_PROFILING=0 AC_ARG_WITH(profiling, [ --with-profiling Disable optimizations for effective profiling ], [ SUPPORT_PROFILING=$withval ]) AC_ARG_WITH(coverage, [ --with-coverage Disable optimizations for effective profiling ], [ SUPPORT_COVERAGE=$withval ]) PUBLICAN_BRAND="common" AC_ARG_WITH(brand, [ --with-brand=brand Brand to use for generated documentation (set empty for no docs) [$PUBLICAN_BRAND]], [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ]) AC_SUBST(PUBLICAN_BRAND) ASCIIDOC_CLI_TYPE="pcs" AC_ARG_WITH(doc-cli, [ --with-doc-cli=cli_type CLI type to use for generated documentation. [$ASCIIDOC_CLI_TYPE]], [ ASCIIDOC_CLI_TYPE="$withval" ]) AC_SUBST(ASCIIDOC_CLI_TYPE) CONFIGDIR="" AC_ARG_WITH(configdir, [ --with-configdir=DIR Directory for Pacemaker configuration file [${CONFIGDIR}]], [ CONFIGDIR="$withval" ] ) dnl =============================================== dnl General Processing dnl =============================================== AC_SUBST(HB_PKG) INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing ais_prefix: ${CSPREFIX}) case $CSPREFIX in dnl For consistency with Heartbeat, map NONE->$prefix NONE) CSPREFIX=$prefix;; prefix) CSPREFIX=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix prefix|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} #docdir=${datadir}/doc/packages/${PACKAGE} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac dnl Eventually remove this if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) ac_save_CFLAGS=$CFLAGS CFLAGS="-Wall -Werror" AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [ #include #include #include ], [ int max = 512; uint64_t bignum = 42; char *buffer = malloc(max); const char *random = "random"; snprintf(buffer, max-1, "", bignum, random); fprintf(stderr, "Result: %s\n", buffer); ] )], [U64T="%lu"], [U64T="%llu"] ) CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) AM_PATH_PYTHON AC_CHECK_PROGS(MAKE, gmake make) AC_PATH_PROGS(HTML2TXT, lynx w3m) AC_PATH_PROGS(HELP2MAN, help2man) AC_PATH_PROGS(POD2MAN, pod2man, pod2man) AC_PATH_PROGS(ASCIIDOC, asciidoc) AC_PATH_PROGS(PUBLICAN, publican) AC_PATH_PROGS(INKSCAPE, inkscape) AC_PATH_PROGS(XSLTPROC, xsltproc) AC_PATH_PROGS(XMLCATALOG, xmlcatalog) AC_PATH_PROGS(FOP, fop) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PKGCONFIG, pkg-config) AC_PATH_PROGS(XML2CONFIG, xml2-config) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) dnl Disable these until we decide if the stonith config file should be supported dnl AC_PATH_PROGS(BISON, bison) dnl AC_PATH_PROGS(FLEX, flex) dnl AC_PATH_PROGS(HAVE_YACC, $YACC) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"") if test x"${ASCIIDOC}" != x""; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi SUPPORT_STONITH_CONFIG=0 if test x"${HAVE_YACC}" != x"" -a x"${FLEX}" != x"" -a x"${BISON}" != x""; then SUPPORT_STONITH_CONFIG=1 PCMK_FEATURES="$PCMK_FEATURES st-conf" fi AM_CONDITIONAL(BUILD_STONITH_CONFIG, test $SUPPORT_STONITH_CONFIG = 1) AC_DEFINE_UNQUOTED(SUPPORT_STONITH_CONFIG, $SUPPORT_STONITH_CONFIG, Support a stand-alone stonith config file in addition to the CIB) publican_intree_brand=no if test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""; then dnl special handling for clusterlabs brand (possibly in-tree version used) test "${PUBLICAN_BRAND}" != "clusterlabs" \ || test -d /usr/share/publican/Common_Content/clusterlabs if test $? -ne 0; then dnl Unknown option: brand_dir vs. Option brand_dir requires an argument if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common]) PUBLICAN_BRAND=common else publican_intree_brand=yes fi fi AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand]) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi AM_CONDITIONAL([BUILD_DOCBOOK], [test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""]) AM_CONDITIONAL([PUBLICAN_INTREE_BRAND], [test x"${publican_intree_brand}" = x"yes"]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolearably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_FUNCS([sched_setscheduler]) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi if test x"${PKGCONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi if $PKGCONFIG --exists glib-2.0 then GLIBCONFIG="$PKGCONFIG glib-2.0" else set -x echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH $PKGCONFIG --exists glib-2.0; echo $? $PKGCONFIG --cflags glib-2.0; echo $? $PKGCONFIG glib-2.0; echo $? set +x AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE}) fi AC_MSG_RESULT(using $GLIBCONFIG) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi if test "X$GLIBCONFIG" != X; then AC_MSG_CHECKING(for special glib includes: ) GLIBHEAD=`$GLIBCONFIG --cflags` AC_MSG_RESULT($GLIBHEAD) CPPFLAGS="$CPPFLAGS $GLIBHEAD" AC_MSG_CHECKING(for glib library flags) GLIBLIB=`$GLIBCONFIG --libs` AC_MSG_RESULT($GLIBLIB) LIBS="$LIBS $GLIBLIB" fi dnl FreeBSD needs -lcompat for ftime() used by lrmd.c AC_CHECK_LIB([compat], [ftime], [COMPAT_LIBS='-lcompat']) AC_SUBST(COMPAT_LIBS) dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(asm/types.h) AC_CHECK_HEADERS(assert.h) AC_CHECK_HEADERS(auth-client.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(fcntl.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/errqueue.h) AC_CHECK_HEADERS(linux/swab.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pam/pam_appl.h) AC_CHECK_HEADERS(pthread.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(security/pam_appl.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/poll.h) AC_CHECK_HEADERS(sys/reboot.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/select.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/signalfd.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/timeb.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/uio.h) AC_CHECK_HEADERS(sys/un.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) AC_CHECK_HEADERS(winsock.h) dnl These headers need prerequisits before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) dnl AC_CHECK_HEADERS(netinet/icmp6.h) dnl AC_CHECK_HEADERS(netinet/ip6.h) dnl AC_CHECK_HEADERS(netinet/ip_icmp.h) AC_MSG_CHECKING(for special libxml2 includes) if test "x$XML2CONFIG" = "x"; then AC_MSG_ERROR(libxml2 config not found) else XML2HEAD="`$XML2CONFIG --cflags`" AC_MSG_RESULT($XML2HEAD) AC_CHECK_LIB(xml2, xmlReadMemory) AC_CHECK_LIB(xslt, xsltApplyStylesheet) fi CPPFLAGS="$CPPFLAGS $XML2HEAD" AC_CHECK_HEADERS(libxml/xpath.h) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(The libxml developement headers were not found) fi if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(The libxslt developement headers were not found) fi dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(g_log_set_default_handler) AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_TRY_COMPILE([#include ],[sighandler_t *f;], has_sighandler_t=yes,has_sighandler_t=no) AC_MSG_RESULT($has_sighandler_t) if test "$has_sighandler_t" = "yes" ; then AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) CURSESLIBS=`$PKGCONFIG --libs ncurses` || CURSESLIBS='-lncurses' fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) CURSESLIBS=`$PKGCONFIG --libs ncurses` || CURSESLIBS='-lncurses' fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" ac_save_CFLAGS=$CFLAGS CFLAGS="-Wcast-qual -Werror" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 if cc_supports_flag -fPIC; then CFLAGS="$CFLAGS -fPIC" fi AC_MSG_CHECKING(whether printw() requires argument of "const char *") AC_LINK_IFELSE( [AC_LANG_PROGRAM( [ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [ac_cv_compatible_printw=yes], [ac_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT([$ac_cv_compatible_printw]) if test "$ac_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage dnl Enable gprof #LIBS="$LIBS -pg" #CFLAGS="$CFLAGS -pg" ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0;; esac AC_MSG_NOTICE(New CFLAGS: $CFLAGS) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - Heartbeat / LibQB dnl ======================================================================== dnl Compatibility checks AC_CHECK_MEMBERS([struct lrm_ops.fail_rsc],,,[[#include ]]) if test x${enable_no_stack} = xyes; then SUPPORT_HEARTBEAT=no SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb >= 0.13) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" AC_CHECK_HEADERS(qb/qbipc_common.h) AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function])) AC_CHECK_HEADERS(heartbeat/hb_config.h) AC_CHECK_HEADERS(heartbeat/glue_config.h) AC_CHECK_HEADERS(stonith/stonith.h) AC_CHECK_HEADERS(agent_config.h) GLUE_HEADER=none HAVE_GLUE=0 if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then GLUE_HEADER=glue_config.h HAVE_GLUE=1 elif test "$ac_cv_header_heartbeat_hb_config_h" = "yes"; then GLUE_HEADER=hb_config.h HAVE_GLUE=1 else AC_MSG_WARN(cluster-glue development headers were not found) fi if test "$ac_cv_header_stonith_stonith_h" = "yes"; then PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi if test $HAVE_GLUE = 1; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) fi dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_DTD_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DTD_DIRECTORY,"$CRM_DTD_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_DTD_DIRECTORY) CRM_CORE_DIR=`try_extract_header_define $GLUE_HEADER HA_COREDIR ${localstatedir}/lib/pacemaker/cores` AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) CRM_DAEMON_USER=`try_extract_header_define $GLUE_HEADER HA_CCMUSER hacluster` AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) CRM_DAEMON_GROUP=`try_extract_header_define $GLUE_HEADER HA_APIGROUP haclient` AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_STATE_DIR=${localstatedir}/run/crm AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets) AC_SUBST(CRM_STATE_DIR) +CRM_LOG_DIR="${localstatedir}/log/pacemaker" +AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Where Pacemaker can store log files) +AC_SUBST(CRM_LOG_DIR) + CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_LEGACY_CONFIG_DIR="${localstatedir}/lib/heartbeat/crm" AC_DEFINE_UNQUOTED(CRM_LEGACY_CONFIG_DIR,"$CRM_LEGACY_CONFIG_DIR", Where Pacemaker used to keep configuration files) AC_SUBST(CRM_LEGACY_CONFIG_DIR) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) HB_DAEMON_DIR=`try_extract_header_define $GLUE_HEADER HA_LIBHBDIR $libdir/heartbeat` AC_DEFINE_UNQUOTED(HB_DAEMON_DIR,"$HB_DAEMON_DIR", Location Heartbeat expects Pacemaker daemons to be in) AC_SUBST(HB_DAEMON_DIR) dnl Needed so that the Corosync plugin can clear out the directory as Heartbeat does HA_STATE_DIR=`try_extract_header_define $GLUE_HEADER HA_VARRUNDIR ${localstatedir}/run` AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where Heartbeat keeps state files and sockets) AC_SUBST(HA_STATE_DIR) CRM_RSCTMP_DIR=`try_extract_header_define agent_config.h HA_RSCTMPDIR $HA_STATE_DIR/resource-agents` AC_MSG_CHECKING(Scratch dir for resource agents) AC_MSG_RESULT($CRM_RSCTMP_DIR) AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) dnl Needed for the location of hostcache in CTS.py HA_VARLIBHBDIR=`try_extract_header_define $GLUE_HEADER HA_VARLIBHBDIR ${localstatedir}/lib/heartbeat` AC_SUBST(HA_VARLIBHBDIR) AC_DEFINE_UNQUOTED(UUID_FILE,"$localstatedir/lib/heartbeat/hb_uuid", Location of Heartbeat's UUID file) OCF_ROOT_DIR=`try_extract_header_define $GLUE_HEADER OCF_ROOT_DIR /usr/lib/ocf` if test "X$OCF_ROOT_DIR" = X; then AC_MSG_ERROR(Could not locate OCF directory) fi AC_SUBST(OCF_ROOT_DIR) OCF_RA_DIR=`try_extract_header_define $GLUE_HEADER OCF_RA_DIR $OCF_ROOT_DIR/resource.d` AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) RH_STONITH_DIR="$sbindir" AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir", Location for system binaries) RH_STONITH_PREFIX="fence_" AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 HAVE_upstart=0 HAVE_systemd=0 PKG_CHECK_MODULES(DBUS, dbus-1, ,HAVE_dbus=0) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) if test $HAVE_dbus = 1; then CFLAGS="$CFLAGS `$PKGCONFIG --cflags dbus-1`" fi DBUS_LIBS="$CFLAGS `$PKGCONFIG --libs dbus-1`" AC_SUBST(DBUS_LIBS) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without DBus]) else enable_systemd=no fi fi if test "x${enable_systemd}" = xtry; then AC_MSG_CHECKING([for systemd version query result via dbus-send]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || systemctl --version 2>/dev/null | grep -q systemd; then enable_systemd=yes else enable_systemd=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) if test "x${enable_systemd}" = xyes; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" AC_MSG_CHECKING([for systemd path for system unit files]) systemdunitdir="${systemdunitdir-}" PKG_CHECK_VAR([systemdunitdir], [systemd], [systemdsystemunitdir], [],[ systemdunitdir=no ]) AC_MSG_RESULT([${systemdunitdir}]) if test "x${systemdunitdir}" = xno; then AC_MSG_FAILURE([cannot enable systemd when systemdunitdir unresolved]) fi fi AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then AC_MSG_FAILURE([cannot enable Upstart without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then AC_MSG_CHECKING([for Upstart version query result via dbus-send]) ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) case $SUPPORT_NAGIOS in 1|yes|true|try) SUPPORT_NAGIOS=1;; *) SUPPORT_NAGIOS=0;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" dnl ======================================================================== dnl Cluster stack - Heartbeat dnl ======================================================================== case $SUPPORT_HEARTBEAT in 1|yes|true|try) AC_MSG_CHECKING(for heartbeat support) AC_CHECK_LIB(hbclient, ll_cluster_new, [SUPPORT_HEARTBEAT=1], [if test $SUPPORT_HEARTBEAT != try; then AC_MSG_FAILURE(Unable to support Heartbeat: client libraries not found) fi]) if test $SUPPORT_HEARTBEAT = 1 ; then STACKS="$STACKS heartbeat" dnl objdump -x ${libdir}/libccmclient.so | grep SONAME | awk '{print $2}' AC_DEFINE_UNQUOTED(CCM_LIBRARY, "libccmclient.so.1", Library to load for ccm support) AC_DEFINE_UNQUOTED(HEARTBEAT_LIBRARY, "libhbclient.so.1", Library to load for heartbeat support) BUILD_ATOMIC_ATTRD=0 else SUPPORT_HEARTBEAT=0 fi ;; *) SUPPORT_HEARTBEAT=0;; esac AM_CONDITIONAL(BUILD_HEARTBEAT_SUPPORT, test $SUPPORT_HEARTBEAT = 1) AC_DEFINE_UNQUOTED(SUPPORT_HEARTBEAT, $SUPPORT_HEARTBEAT, Support the Heartbeat messaging and membership layer) AC_SUBST(SUPPORT_HEARTBEAT) dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_CS=no;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" CS_USES_LIBQB=0 PCMK_SERVICE_ID=9 if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS, with '$CSPREFIX') SUPPORT_CS=1 PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap, HAVE_cmap=1, HAVE_cmap=0) PKG_CHECK_MODULES(cman, libcman, HAVE_cman=1, HAVE_cman=0) PKG_CHECK_MODULES(confdb, libconfdb, HAVE_confdb=1, HAVE_confdb=0) PKG_CHECK_MODULES(fenced, libfenced, HAVE_fenced=1, HAVE_fenced=0) PKG_CHECK_MODULES(quorum, libquorum, HAVE_quorum=1, HAVE_quorum=0) PKG_CHECK_MODULES(oldipc, libcoroipcc, HAVE_oldipc=1, HAVE_oldipc=0) if test $HAVE_oldipc = 1; then CFLAGS="$CFLAGS $oldipc_FLAGS $cpg_FLAGS $cfg_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $oldipc_LIBS $cpg_LIBS $cfg_LIBS" else CS_USES_LIBQB=1 CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS" AC_CHECK_LIB(corosync_common, cs_strerror) fi AC_DEFINE_UNQUOTED(HAVE_CONFDB, $HAVE_confdb, Have the old herarchial Corosync config API) AC_DEFINE_UNQUOTED(HAVE_CMAP, $HAVE_cmap, Have the new non-herarchial Corosync config API) fi if test $SUPPORT_CS = 1 -a x$HAVE_oldipc = x0 ; then dnl Support for plugins was removed about the time the IPC was dnl moved to libqb. dnl The only option now is the built-in quorum API CFLAGS="$CFLAGS $cmap_CFLAGS $quorum_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cmap_LIBS $quorum_LIBS" STACKS="$STACKS corosync-native" AC_DEFINE_UNQUOTED(SUPPORT_CS_QUORUM, 1, Support the consumption of membership and quorum from corosync) fi SUPPORT_PLUGIN=0 if test $SUPPORT_CS = 1 -a x$HAVE_confdb = x1; then dnl Need confdb to support cman and the plugins SUPPORT_PLUGIN=1 BUILD_ATOMIC_ATTRD=0 AC_MSG_CHECKING([for corosync path for plugins]) PKG_CHECK_VAR([LCRSODIR], [corosync], [lcrsodir], [ AC_MSG_RESULT([$LCRSODIR]) ],[ AC_SUBST([LCRSODIR], [$libdir]) AC_MSG_RESULT([$LCRSODIR (fallback)]) ]) STACKS="$STACKS corosync-plugin" COROSYNC_LIBS="$COROSYNC_LIBS $confdb_LIBS" if test $SUPPORT_CMAN != no; then if test $HAVE_cman = 1 -a $HAVE_fenced = 1; then SUPPORT_CMAN=1 STACKS="$STACKS cman" CFLAGS="$CFLAGS $cman_FLAGS $fenced_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cman_LIBS $fenced_LIBS" fi fi fi dnl Normalize SUPPORT_CS and SUPPORT_CMAN for use with #if directives if test $SUPPORT_CMAN != 1; then SUPPORT_CMAN=0 fi if test $SUPPORT_CS = 1; then CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" elif test $SUPPORT_CS != 0; then SUPPORT_CS=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support Corosync: $aisreason) else AC_MSG_FAILURE(Unable to support Corosync: $aisreason) fi fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AC_DEFINE_UNQUOTED(SUPPORT_CMAN, $SUPPORT_CMAN, Support the consumption of membership and quorum from cman) AC_DEFINE_UNQUOTED(CS_USES_LIBQB, $CS_USES_LIBQB, Does corosync use libqb for its ipc) AC_DEFINE_UNQUOTED(PCMK_SERVICE_ID, $PCMK_SERVICE_ID, Corosync service number) AC_DEFINE_UNQUOTED(SUPPORT_PLUGIN, $SUPPORT_PLUGIN, Support the Pacemaker plugin for Corosync) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AM_CONDITIONAL(BUILD_CS_PLUGIN, test $SUPPORT_PLUGIN = 1) AM_CONDITIONAL(BUILD_CMAN, test $SUPPORT_CMAN = 1) AM_CONDITIONAL(BUILD_ATOMIC_ATTRD, test $BUILD_ATOMIC_ATTRD = 1) AC_DEFINE_UNQUOTED(HAVE_ATOMIC_ATTRD, $BUILD_ATOMIC_ATTRD, Support the new atomic attrd) AC_SUBST(SUPPORT_CMAN) AC_SUBST(SUPPORT_CS) AC_SUBST(SUPPORT_PLUGIN) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported. Just building the Policy Engine) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack (heartbeat or corosync) ) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi if test ${BUILD_ATOMIC_ATTRD} = 1; then PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" fi AC_SUBST(CLUSTERLIBS) dnl ======================================================================== dnl SNMP dnl ======================================================================== case $SUPPORT_SNMP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_SNMP=no;; esac SNMPLIBS="" AC_MSG_CHECKING(for snmp support) if test $SUPPORT_SNMP = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_SNMP=0 else SNMPCONFIG="" AC_MSG_RESULT($SUPPORT_SNMP) AC_CHECK_HEADERS(net-snmp/net-snmp-config.h) if test "x${ac_cv_header_net_snmp_net_snmp_config_h}" != "xyes"; then SUPPORT_SNMP="no" fi if test $SUPPORT_SNMP != no; then AC_PATH_PROGS(SNMPCONFIG, net-snmp-config) if test "X${SNMPCONFIG}" = "X"; then AC_MSG_RESULT(You need the net_snmp development package to continue.) SUPPORT_SNMP=no fi fi if test $SUPPORT_SNMP != no; then AC_MSG_CHECKING(for special snmp libraries) SNMPLIBS=`$SNMPCONFIG --agent-libs` AC_MSG_RESULT($SNMPLIBS) fi if test $SUPPORT_SNMP != no; then savedLibs=$LIBS LIBS="$LIBS $SNMPLIBS" dnl On many systems libcrypto is needed when linking against libsnmp. dnl Check to see if it exists, and if so use it. dnl AC_CHECK_LIB(crypto, CRYPTO_free, CRYPTOLIB="-lcrypto",) dnl AC_SUBST(CRYPTOLIB) AC_CHECK_FUNCS(netsnmp_transport_open_client) if test $ac_cv_func_netsnmp_transport_open_client != yes; then AC_CHECK_FUNCS(netsnmp_tdomain_transport) if test $ac_cv_func_netsnmp_tdomain_transport != yes; then SUPPORT_SNMP=no else AC_DEFINE_UNQUOTED(NETSNMPV53, 1, [Use the older 5.3 version of the net-snmp API]) fi fi LIBS=$savedLibs fi if test $SUPPORT_SNMP = no; then SNMPLIBS="" SUPPORT_SNMP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support SNMP) else AC_MSG_FAILURE(Unable to support SNMP) fi else SUPPORT_SNMP=1 fi fi if test $SUPPORT_SNMP = 1; then PCMK_FEATURES="$PCMK_FEATURES snmp" fi AC_SUBST(SNMPLIBS) AM_CONDITIONAL(ENABLE_SNMP, test "$SUPPORT_SNMP" = "1") AC_DEFINE_UNQUOTED(ENABLE_SNMP, $SUPPORT_SNMP, Build in support for sending SNMP traps) dnl ======================================================================== dnl ESMTP dnl ======================================================================== case $SUPPORT_ESMTP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ESMTP=no;; esac ESMTPLIB="" AC_MSG_CHECKING(for esmtp support) if test $SUPPORT_ESMTP = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ESMTP=0 else ESMTPCONFIG="" AC_MSG_RESULT($SUPPORT_ESMTP) AC_CHECK_HEADERS(libesmtp.h) if test "x${ac_cv_header_libesmtp_h}" != "xyes"; then ENABLE_ESMTP="no" fi if test $SUPPORT_ESMTP != no; then AC_PATH_PROGS(ESMTPCONFIG, libesmtp-config) if test "X${ESMTPCONFIG}" = "X"; then AC_MSG_RESULT(You need the libesmtp development package to continue.) SUPPORT_ESMTP=no fi fi if test $SUPPORT_ESMTP != no; then AC_MSG_CHECKING(for special esmtp libraries) ESMTPLIBS=`$ESMTPCONFIG --libs | tr '\n' ' '` AC_MSG_RESULT($ESMTPLIBS) fi if test $SUPPORT_ESMTP = no; then SUPPORT_ESMTP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ESMTP) else AC_MSG_FAILURE(Unable to support ESMTP) fi else SUPPORT_ESMTP=1 PCMK_FEATURES="$PCMK_FEATURES libesmtp" fi fi AC_SUBST(ESMTPLIBS) AM_CONDITIONAL(ENABLE_ESMTP, test "$SUPPORT_ESMTP" = "1") AC_DEFINE_UNQUOTED(ENABLE_ESMTP, $SUPPORT_ESMTP, Build in support for sending mail notifications with ESMTP) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ACL=no;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) SUPPORT_ACL=1 AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then SUPPORT_ACL=0 fi if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1;; *) SUPPORT_CIBSECRETS=0;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) if test $SUPPORT_CIBSECRETS = 1; then PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) LRM_LEGACY_CIBSECRETS_DIR="${localstatedir}/lib/heartbeat/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_LEGACY_CIBSECRETS_DIR,"$LRM_LEGACY_CIBSECRETS_DIR", Legacy location for CIB secrets) AC_SUBST(LRM_LEGACY_CIBSECRETS_DIR) fi dnl ======================================================================== dnl GnuTLS dnl ======================================================================== AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program. dnl If no 'libgnutls-config', try traditional autoconf means. AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config) if test -n "$LIBGNUTLS_CONFIG"; then AC_MSG_CHECKING(for gnutls header flags) GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`"; AC_MSG_RESULT($GNUTLSHEAD) AC_MSG_CHECKING(for gnutls library flags) GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`"; AC_MSG_RESULT($GNUTLSLIBS) fi AC_CHECK_LIB(gnutls, gnutls_init) AC_CHECK_FUNCS(gnutls_priority_set_direct) AC_SUBST(GNUTLSHEAD) AC_SUBST(GNUTLSLIBS) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKGCONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKGCONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no SAVED_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -Werror" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) CFLAGS="$SAVED_CFLAGS" if cc_supports_flag "-Wformat-nonliteral"; then gcc_format_nonliteral=yes else gcc_format_nonliteral=no fi # We had to eliminate -Wnested-externs because of libtool changes EXTRA_FLAGS="-fgnu89-inline -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat-security -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wwrite-strings -Wunused-but-set-variable -Wunsigned-char" if test "x$gcc_diagnostic_push_pull" = "xyes"; then AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" else if test "x$gcc_format_nonliteral" = "xyes"; then EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" fi fi # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (attrd, cib, crmd, lrmd, stonithd, pacemakerd, pacemaker_remoted, dnl pengine) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi if test "x${HARDENING}" = "xno"; then AC_MSG_NOTICE([Hardening: explicitly disabled]) elif test "x${HARDENING}" = "xyes" \ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then dnl We'll figure out on our own... CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; relro=1] ) # daemons: PIE for both CFLAGS and LDFLAGS if cc_supports_flag -fPIE; then flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; pie=1] ) fi # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries if test "${relro}" = 1 && test "${pie}" = 1; then flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; bindnow=1] ) fi if test "${bindnow}" = 1; then flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"] ) fi # universal: prefer strong > all > default stack protector if possible flag= if cc_supports_flag -fstack-protector-strong; then flag="-fstack-protector-strong" elif cc_supports_flag -fstack-protector-all; then flag="-fstack-protector-all" elif cc_supports_flag -fstack-protector; then flag="-fstack-protector" fi if test -n "${flag}"; then CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 fi if test "${relro}" = 1 \ || test "${pie}" = 1 \ || test "${stackprot}" = 1; then AC_MSG_NOTICE( [Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) else AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) fi else AC_MSG_NOTICE([Hardening: using custom flags]) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff... AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--quiet" QUIET_MAKE_OPTS="--quiet" fi AC_MSG_RESULT(Supress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKE="${MAKE} \$(QUIET_MAKE_OPTS)" AC_SUBST(CC) AC_SUBST(MAKE) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_MAKE_OPTS) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ Doxyfile \ coverage.sh \ cts/Makefile \ cts/CTSvars.py \ cts/LSBDummy \ cts/HBDummy \ cts/benchmark/Makefile \ cts/benchmark/clubench \ cts/lxc_autogen.sh \ cib/Makefile \ attrd/Makefile \ crmd/Makefile \ pengine/Makefile \ pengine/regression.core.sh \ doc/Makefile \ doc/Clusters_from_Scratch/publican.cfg \ doc/Pacemaker_Development/publican.cfg \ doc/Pacemaker_Explained/publican.cfg \ doc/Pacemaker_Remote/publican.cfg \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ replace/Makefile \ lib/Makefile \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pengine.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/ais/Makefile \ lib/common/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/pengine/Makefile \ lib/transition/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ mcp/Makefile \ mcp/pacemaker \ mcp/pacemaker.service \ mcp/pacemaker.upstart \ mcp/pacemaker.combined.upstart \ fencing/Makefile \ fencing/regression.py \ lrmd/Makefile \ lrmd/regression.py \ lrmd/pacemaker_remote.service \ lrmd/pacemaker_remote \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ tools/Makefile \ tools/crm_report \ tools/report.common \ tools/cibsecret \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ lib/gnu/Makefile \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) if test $SUPPORT_PLUGIN = 1; then AC_MSG_RESULT([ Corosync Plugins = ${LCRSODIR}]) fi AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/doc/Pacemaker_Explained/en-US/Ch-Advanced-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Advanced-Resources.txt index 3418881fe1..226357aa3e 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Advanced-Resources.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Advanced-Resources.txt @@ -1,1355 +1,1371 @@ = Advanced Resource Types = [[group-resources]] == Groups - A Syntactic Shortcut == indexterm:[Group Resources] indexterm:[Resource,Groups] One of the most common elements of a cluster is a set of resources that need to be located together, start sequentially, and stop in the reverse order. To simplify this configuration, we support the concept of groups. .A group of two primitive resources ====== [source,XML] ------- ------- ====== Although the example above contains only two resources, there is no limit to the number of resources a group can contain. The example is also sufficient to explain the fundamental properties of a group: * Resources are started in the order they appear in (+Public-IP+ first, then +Email+) * Resources are stopped in the reverse order to which they appear in (+Email+ first, then +Public-IP+) If a resource in the group can't run anywhere, then nothing after that is allowed to run, too. * If +Public-IP+ can't run anywhere, neither can +Email+; * but if +Email+ can't run anywhere, this does not affect +Public-IP+ in any way The group above is logically equivalent to writing: .How the cluster sees a group resource ====== [source,XML] ------- ------- ====== Obviously as the group grows bigger, the reduced configuration effort can become significant. Another (typical) example of a group is a DRBD volume, the filesystem mount, an IP address, and an application that uses them. === Group Properties === .Properties of a Group Resource [width="95%",cols="3m,5<",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the group indexterm:[id,Group Resource Property] indexterm:[Resource,Group Property,id] |========================================================= === Group Options === Groups inherit the +priority+, +target-role+, and +is-managed+ properties from primitive resources. See <> for information about those properties. === Group Instance Attributes === Groups have no instance attributes. However, any that are set for the group object will be inherited by the group's children. === Group Contents === Groups may only contain a collection of cluster resources (see <>). To refer to a child of a group resource, just use the child's +id+ instead of the group's. === Group Constraints === Although it is possible to reference a group's children in constraints, it is usually preferable to reference the group itself. .Some constraints involving groups ====== [source,XML] ------- ------- ====== === Group Stickiness === indexterm:[resource-stickiness,Groups] Stickiness, the measure of how much a resource wants to stay where it is, is additive in groups. Every active resource of the group will contribute its stickiness value to the group's total. So if the default +resource-stickiness+ is 100, and a group has seven members, five of which are active, then the group as a whole will prefer its current location with a score of 500. [[s-resource-clone]] == Clones - Resources That Get Active on Multiple Hosts == indexterm:[Clone Resources] indexterm:[Resource,Clones] Clones were initially conceived as a convenient way to start multiple instances of an IP address resource and have them distributed throughout the cluster for load balancing. They have turned out to quite useful for a number of purposes including integrating with the Distributed Lock Manager (used by many cluster filesystems), the fencing subsystem, and OCFS2. You can clone any resource, provided the resource agent supports it. Three types of cloned resources exist: * Anonymous * Globally unique * Stateful 'Anonymous' clones are the simplest. These behave completely identically everywhere they are running. Because of this, there can be only one copy of an anonymous clone active per machine. 'Globally unique' clones are distinct entities. A copy of the clone running on one machine is not equivalent to another instance on another node, nor would any two copies on the same node be equivalent. 'Stateful' clones are covered later in <>. .A clone of an LSB resource ====== [source,XML] ------- ------- ====== === Clone Properties === .Properties of a Clone Resource [width="95%",cols="3m,5<",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the clone indexterm:[id,Clone Property] indexterm:[Clone,Property,id] |========================================================= === Clone Options === Options inherited from <> resources: +priority, target-role, is-managed+ .Clone-specific configuration options [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |clone-max |number of nodes in cluster |How many copies of the resource to start indexterm:[clone-max,Clone Option] indexterm:[Clone,Option,clone-max] |clone-node-max |1 |How many copies of the resource can be started on a single node indexterm:[clone-node-max,Clone Option] indexterm:[Clone,Option,clone-node-max] |clone-min |1 |Require at least this number of clone instances to be runnable before allowing resources depending on the clone to be runnable '(since 1.1.14)' indexterm:[clone-min,Clone Option] indexterm:[Clone,Option,clone-min] |notify |true |When stopping or starting a copy of the clone, tell all the other copies beforehand and again when the action was successful. Allowed values: +false+, +true+ indexterm:[notify,Clone Option] indexterm:[Clone,Option,notify] |globally-unique |false |Does each copy of the clone perform a different function? Allowed values: +false+, +true+ indexterm:[globally-unique,Clone Option] indexterm:[Clone,Option,globally-unique] |ordered |false |Should the copies be started in series (instead of in parallel)? Allowed values: +false+, +true+ indexterm:[ordered,Clone Option] indexterm:[Clone,Option,ordered] |interleave |false |If this clone depends on another clone via an ordering constraint, is it allowed to start after the local instance of the other clone starts, rather than wait for all instances of the other clone to start? Allowed values: +false+, +true+ indexterm:[interleave,Clone Option] indexterm:[Clone,Option,interleave] |========================================================= === Clone Instance Attributes === Clones have no instance attributes; however, any that are set here will be inherited by the clone's children. === Clone Contents === Clones must contain exactly one primitive or group resource. [WARNING] You should never reference the name of a clone's child. If you think you need to do this, you probably need to re-evaluate your design. === Clone Constraints === In most cases, a clone will have a single copy on each active cluster node. If this is not the case, you can indicate which nodes the cluster should preferentially assign copies to with resource location constraints. These constraints are written no differently from those for primitive resources except that the clone's +id+ is used. .Some constraints involving clones ====== [source,XML] ------- ------- ====== Ordering constraints behave slightly differently for clones. In the example above, +apache-stats+ will wait until all copies of +apache-clone+ that need to be started have done so before being started itself. Only if _no_ copies can be started will +apache-stats+ be prevented from being active. Additionally, the clone will wait for +apache-stats+ to be stopped before stopping itself. Colocation of a primitive or group resource with a clone means that the resource can run on any machine with an active copy of the clone. The cluster will choose a copy based on where the clone is running and the resource's own location preferences. Colocation between clones is also possible. If one clone +A+ is colocated with another clone +B+, the set of allowed locations for +A+ is limited to nodes on which +B+ is (or will be) active. Placement is then performed normally. [[s-clone-stickiness]] === Clone Stickiness === indexterm:[resource-stickiness,Clones] To achieve a stable allocation pattern, clones are slightly sticky by default. If no value for +resource-stickiness+ is provided, the clone will use a value of 1. Being a small value, it causes minimal disturbance to the score calculations of other resources but is enough to prevent Pacemaker from needlessly moving copies around the cluster. [NOTE] ==== For globally unique clones, this may result in multiple instances of the clone staying on a single node, even after another eligible node becomes active (for example, after being put into standby mode then made active again). If you do not want this behavior, specify a +resource-stickiness+ of 0 for the clone temporarily and let the cluster adjust, then set it back to 1 if you want the default behavior to apply again. ==== === Clone Resource Agent Requirements === Any resource can be used as an anonymous clone, as it requires no additional support from the resource agent. Whether it makes sense to do so depends on your resource and its resource agent. Globally unique clones do require some additional support in the resource agent. In particular, it must only respond with +$\{OCF_SUCCESS}+ if the node has that exact instance active. All other probes for instances of the clone should result in +$\{OCF_NOT_RUNNING}+ (or one of the other OCF error codes if they are failed). Individual instances of a clone are identified by appending a colon and a numerical offset, e.g. +apache:2+. Resource agents can find out how many copies there are by examining the +OCF_RESKEY_CRM_meta_clone_max+ environment variable and which copy it is by examining +OCF_RESKEY_CRM_meta_clone+. The resource agent must not make any assumptions (based on +OCF_RESKEY_CRM_meta_clone+) about which numerical instances are active. In particular, the list of active copies will not always be an unbroken sequence, nor always start at 0. ==== Clone Notifications ==== Supporting notifications requires the +notify+ action to be implemented. If supported, the notify action will be passed a number of extra variables which, when combined with additional context, can be used to calculate the current state of the cluster and what is about to happen to it. .Environment variables supplied with Clone notify actions [width="95%",cols="5,3<",options="header",align="center"] |========================================================= |Variable |Description |OCF_RESKEY_CRM_meta_notify_type |Allowed values: +pre+, +post+ indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,type] indexterm:[type,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_operation |Allowed values: +start+, +stop+ indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,operation] indexterm:[operation,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_start_resource |Resources to be started indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,start_resource] indexterm:[start_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_stop_resource |Resources to be stopped indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,stop_resource] indexterm:[stop_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_active_resource |Resources that are running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,active_resource] indexterm:[active_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_inactive_resource |Resources that are not running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,inactive_resource] indexterm:[inactive_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_start_uname |Nodes on which resources will be started indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,start_uname] indexterm:[start_uname,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_stop_uname |Nodes on which resources will be stopped indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,stop_uname] indexterm:[stop_uname,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_active_uname |Nodes on which resources are running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,active_uname] indexterm:[active_uname,Notification Environment Variable] |========================================================= The variables come in pairs, such as +OCF_RESKEY_CRM_meta_notify_start_resource+ and +OCF_RESKEY_CRM_meta_notify_start_uname+ and should be treated as an array of whitespace-separated elements. +OCF_RESKEY_CRM_meta_notify_inactive_resource+ is an exception as the matching +uname+ variable does not exist since inactive resources are not running on any node. Thus in order to indicate that +clone:0+ will be started on +sles-1+, +clone:2+ will be started on +sles-3+, and +clone:3+ will be started on +sles-2+, the cluster would set .Notification variables ====== [source,Bash] ------- OCF_RESKEY_CRM_meta_notify_start_resource="clone:0 clone:2 clone:3" OCF_RESKEY_CRM_meta_notify_start_uname="sles-1 sles-3 sles-2" ------- ====== ==== Proper Interpretation of Notification Environment Variables ==== .Pre-notification (stop): * Active resources: +$OCF_RESKEY_CRM_meta_notify_active_resource+ * Inactive resources: +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (stop) / Pre-notification (start): * Active resources ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Inactive resources ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (start): * Active resources: ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Inactive resources: ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ [[s-resource-multistate]] == Multi-state - Resources That Have Multiple Modes == indexterm:[Multi-state Resources] indexterm:[Resource,Multi-state] Multi-state resources are a specialization of clone resources; please ensure you understand <> before continuing! Multi-state resources allow the instances to be in one of two operating modes (called 'roles'). The roles are called 'master' and 'slave', but can mean whatever you wish them to mean. The only limitation is that when an instance is started, it must come up in the slave role. === Multi-state Properties === .Properties of a Multi-State Resource [width="95%",cols="3m,5<",options="header",align="center"] |========================================================= |Field |Description |id |Your name for the multi-state resource indexterm:[id,Multi-State Property] indexterm:[Multi-State,Property,id] |========================================================= === Multi-state Options === Options inherited from <> resources: +priority+, +target-role+, +is-managed+ Options inherited from <> resources: +clone-max+, +clone-node-max+, +notify+, +globally-unique+, +ordered+, +interleave+ .Multi-state-specific resource configuration options [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |master-max |1 |How many copies of the resource can be promoted to the +master+ role indexterm:[master-max,Multi-State Option] indexterm:[Multi-State,Option,master-max] |master-node-max |1 |How many copies of the resource can be promoted to the +master+ role on a single node indexterm:[master-node-max,Multi-State Option] indexterm:[Multi-State,Option,master-node-max] |========================================================= === Multi-state Instance Attributes === Multi-state resources have no instance attributes; however, any that are set here will be inherited by a master's children. === Multi-state Contents === Masters must contain exactly one primitive or group resource. [WARNING] You should never reference the name of a master's child. If you think you need to do this, you probably need to re-evaluate your design. === Monitoring Multi-State Resources === The usual monitor actions are insufficient to monitor a multi-state resource, because pacemaker needs to verify not only that the resource is active, but also that its actual role matches its intended one. Define two monitoring actions: the usual one will cover the slave role, and an additional one with +role="master"+ will cover the master role. .Monitoring both states of a multi-state resource ====== [source,XML] ------- ------- ====== [IMPORTANT] =========== It is crucial that _every_ monitor operation has a different interval! Pacemaker currently differentiates between operations only by resource and interval; so if (for example) a master/slave resource had the same monitor interval for both roles, Pacemaker would ignore the role when checking the status -- which would cause unexpected return codes, and therefore unnecessary complications. =========== === Multi-state Constraints === In most cases, multi-state resources will have a single copy on each active cluster node. If this is not the case, you can indicate which nodes the cluster should preferentially assign copies to with resource location constraints. These constraints are written no differently from those for primitive resources except that the master's +id+ is used. When considering multi-state resources in constraints, for most purposes it is sufficient to treat them as clones. The exception is that the +first-action+ and/or +then-action+ fields for ordering constraints may be set to +promote+ or +demote+ to constrain the master role, and colocation constraints may contain +rsc-role+ and/or +with-rsc-role+ fields. .Additional colocation constraint options for multi-state resources [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |rsc-role |Started |An additional attribute of colocation constraints that specifies the role that +rsc+ must be in. Allowed values: +Started+, +Master+, +Slave+. indexterm:[rsc-role,Ordering Constraints] indexterm:[Constraints,Ordering,rsc-role] |with-rsc-role |Started |An additional attribute of colocation constraints that specifies the role that +with-rsc+ must be in. Allowed values: +Started+, +Master+, +Slave+. indexterm:[with-rsc-role,Ordering Constraints] indexterm:[Constraints,Ordering,with-rsc-role] |========================================================= .Constraints involving multi-state resources ====== [source,XML] ------- ------- ====== In the example above, +myApp+ will wait until one of the database copies has been started and promoted to master before being started itself on the same node. Only if no copies can be promoted will +myApp+ be prevented from being active. Additionally, the cluster will wait for +myApp+ to be stopped before demoting the database. Colocation of a primitive or group resource with a multi-state resource means that it can run on any machine with an active copy of the multi-state resource that has the specified role (+master+ or +slave+). In the example above, the cluster will choose a location based on where database is running as a +master+, and if there are multiple +master+ instances it will also factor in +myApp+'s own location preferences when deciding which location to choose. Colocation with regular clones and other multi-state resources is also possible. In such cases, the set of allowed locations for the +rsc+ clone is (after role filtering) limited to nodes on which the +with-rsc+ multi-state resource is (or will be) in the specified role. Placement is then performed as normal. ==== Using Multi-state Resources in Colocation Sets ==== .Additional colocation set options relevant to multi-state resources [width="95%",cols="1m,1,6<",options="header",align="center"] |========================================================= |Field |Default |Description |role |Started |The role that 'all members' of the set must be in. Allowed values: +Started+, +Master+, +Slave+. indexterm:[role,Ordering Constraints] indexterm:[Constraints,Ordering,role] |========================================================= In the following example +B+'s master must be located on the same node as +A+'s master. Additionally resources +C+ and +D+ must be located on the same node as +A+'s and +B+'s masters. .Colocate C and D with A's and B's master instances ====== [source,XML] ------- ------- ====== ==== Using Multi-state Resources in Ordering Sets ==== .Additional ordered set options relevant to multi-state resources [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |action |value of +first-action+ |An additional attribute of ordering constraint sets that specifies the action that applies to 'all members' of the set. Allowed values: +start+, +stop+, +promote+, +demote+. indexterm:[action,Ordering Constraints] indexterm:[Constraints,Ordering,action] |========================================================= .Start C and D after first promoting A and B ====== [source,XML] ------- ------- ====== In the above example, +B+ cannot be promoted to a master role until +A+ has been promoted. Additionally, resources +C+ and +D+ must wait until +A+ and +B+ have been promoted before they can start. === Multi-state Stickiness === indexterm:[resource-stickiness,Multi-State] As with regular clones, multi-state resources are slightly sticky by default. See <> for details. === Which Resource Instance is Promoted === During the start operation, most resource agents should call the `crm_master` utility. This tool automatically detects both the resource and host and should be used to set a preference for being promoted. Based on this, +master-max+, and +master-node-max+, the instance(s) with the highest preference will be promoted. An alternative is to create a location constraint that indicates which nodes are most preferred as masters. .Explicitly preferring node1 to be promoted to master ====== [source,XML] ------- ------- ====== === Requirements for Multi-state Resource Agents === Since multi-state resources are an extension of cloned resources, all the requirements for resource agents that support clones are also requirements for resource agents that support multi-state resources. Additionally, multi-state resources require two extra actions, +demote+ and +promote+, which are responsible for changing the state of the resource. Like +start+ and +stop+, they should return +$\{OCF_SUCCESS}+ if they completed successfully or a relevant error code if they did not. The states can mean whatever you wish, but when the resource is started, it must come up in the mode called +slave+. From there the cluster will decide which instances to promote to +master+. In addition to the clone requirements for monitor actions, agents must also _accurately_ report which state they are in. The cluster relies on the agent to report its status (including role) accurately and does not indicate to the agent what role it currently believes it to be in. .Role implications of OCF return codes [width="95%",cols="1,1<",options="header",align="center"] |========================================================= |Monitor Return Code |Description |OCF_NOT_RUNNING |Stopped indexterm:[Return Code,OCF_NOT_RUNNING] |OCF_SUCCESS |Running (Slave) indexterm:[Return Code,OCF_SUCCESS] |OCF_RUNNING_MASTER |Running (Master) indexterm:[Return Code,OCF_RUNNING_MASTER] |OCF_FAILED_MASTER |Failed (Master) indexterm:[Return Code,OCF_FAILED_MASTER] |Other |Failed (Slave) |========================================================= ==== Multi-state Notifications ==== Like clones, supporting notifications requires the +notify+ action to be implemented. If supported, the notify action will be passed a number of extra variables which, when combined with additional context, can be used to calculate the current state of the cluster and what is about to happen to it. .Environment variables supplied with multi-state notify actions footnote:[Emphasized variables are specific to +Master+ resources, and all behave in the same manner as described for Clone resources.] [width="95%",cols="5,3<",options="header",align="center"] |========================================================= |Variable |Description |OCF_RESKEY_CRM_meta_notify_type |Allowed values: +pre+, +post+ indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,type] indexterm:[type,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_operation |Allowed values: +start+, +stop+ indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,operation] indexterm:[operation,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_active_resource |Resources that are running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,active_resource] indexterm:[active_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_inactive_resource |Resources that are not running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,inactive_resource] indexterm:[inactive_resource,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_master_resource_ |Resources that are running in +Master+ mode indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,master_resource] indexterm:[master_resource,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_slave_resource_ |Resources that are running in +Slave+ mode indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,slave_resource] indexterm:[slave_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_start_resource |Resources to be started indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,start_resource] indexterm:[start_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_stop_resource |Resources to be stopped indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,stop_resource] indexterm:[stop_resource,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_promote_resource_ |Resources to be promoted indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,promote_resource] indexterm:[promote_resource,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_demote_resource_ |Resources to be demoted indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,demote_resource] indexterm:[demote_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_start_uname |Nodes on which resources will be started indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,start_uname] indexterm:[start_uname,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_stop_uname |Nodes on which resources will be stopped indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,stop_uname] indexterm:[stop_uname,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_promote_uname_ |Nodes on which resources will be promoted indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,promote_uname] indexterm:[promote_uname,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_demote_uname_ |Nodes on which resources will be demoted indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,demote_uname] indexterm:[demote_uname,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_active_uname |Nodes on which resources are running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,active_uname] indexterm:[active_uname,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_master_uname_ |Nodes on which resources are running in +Master+ mode indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,master_uname] indexterm:[master_uname,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_slave_uname_ |Nodes on which resources are running in +Slave+ mode indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,slave_uname] indexterm:[slave_uname,Notification Environment Variable] |========================================================= ==== Proper Interpretation of Multi-state Notification Environment Variables ==== .Pre-notification (demote): * +Active+ resources: +$OCF_RESKEY_CRM_meta_notify_active_resource+ * +Master+ resources: +$OCF_RESKEY_CRM_meta_notify_master_resource+ * +Slave+ resources: +$OCF_RESKEY_CRM_meta_notify_slave_resource+ * Inactive resources: +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (demote) / Pre-notification (stop): * +Active+ resources: +$OCF_RESKEY_CRM_meta_notify_active_resource+ * +Master+ resources: ** +$OCF_RESKEY_CRM_meta_notify_master_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * +Slave+ resources: +$OCF_RESKEY_CRM_meta_notify_slave_resource+ * Inactive resources: +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ .Post-notification (stop) / Pre-notification (start) * +Active+ resources: ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * +Master+ resources: ** +$OCF_RESKEY_CRM_meta_notify_master_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * +Slave+ resources: ** +$OCF_RESKEY_CRM_meta_notify_slave_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Inactive resources: ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (start) / Pre-notification (promote) * +Active+ resources: ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * +Master+ resources: ** +$OCF_RESKEY_CRM_meta_notify_master_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * +Slave+ resources: ** +$OCF_RESKEY_CRM_meta_notify_slave_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Inactive resources: ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (promote) * +Active+ resources: ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * +Master+ resources: ** +$OCF_RESKEY_CRM_meta_notify_master_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_demote_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * +Slave+ resources: ** +$OCF_RESKEY_CRM_meta_notify_slave_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Inactive resources: ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources that were demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ [[s-resource-bundle]] == Bundles - Isolated Environments == indexterm:[bundle] indexterm:[Resource,bundle] indexterm:[Docker,bundle] Pacemaker (version 1.1.17 and later) supports a special syntax for combining an isolated environment with the infrastructure support that it needs: the 'bundle'. The only isolation technology currently supported by Pacemaker bundles is https://www.docker.com/[Docker] containers. footnote:[Docker is a trademark of Docker, Inc. No endorsement by or association with Docker, Inc. is implied.] .A bundle for a containerized web server ==== [source,XML] ---- - + + ---- ==== === Bundle Properties === .Properties of a Bundle [width="95%",cols="3m,5<",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the bundle (required) indexterm:[id,bundle] indexterm:[bundle,Property,id] |description |Arbitrary text (not used by Pacemaker) indexterm:[description,bundle] indexterm:[bundle,Property,description] |========================================================= === Docker Properties === A bundle must contain exactly one ++ element. Before configuring a bundle in Pacemaker, the user must install Docker and supply a fully configured Docker image on every node allowed to run the bundle. Pacemaker will create an implicit +ocf:heartbeat:docker+ resource to manage a bundle's Docker container. .Properties of a Bundle's Docker Element [width="95%",cols="2m,1,4<",options="header",align="center"] |========================================================= |Field |Default |Description |image | |Docker image tag (required) indexterm:[image,Docker] indexterm:[Docker,Property,image] |replicas |Value of +masters+ if that is positive, else 1 |A positive integer specifying the number of container instances to launch indexterm:[replicas,Docker] indexterm:[Docker,Property,replicas] |replicas-per-host |1 |A positive integer specifying the number of container instances allowed to run on a single node indexterm:[replicas-per-host,Docker] indexterm:[Docker,Property,replicas-per-host] |masters |0 |A non-negative integer that, if positive, indicates that the containerized - service should be treated as a master/slave service, with this many master - instances allowed + service should be treated as a multistate service, with this many replicas + allowed to run the service in the master role indexterm:[masters,Docker] indexterm:[Docker,Property,masters] |network | |If specified, this will be passed to +docker run+ as the https://docs.docker.com/engine/reference/run/#network-settings[network setting] for the Docker container. indexterm:[network,Docker] indexterm:[Docker,Property,network] |options | |Extra command-line options to pass to `docker run` indexterm:[options,Docker] indexterm:[Docker,Property,options] |========================================================= === Bundle Network Properties === A bundle may optionally contain one ++ element. indexterm:[bundle,network] .Properties of a Bundle's Network Element [width="95%",cols="2m,1,4<",options="header",align="center"] |========================================================= |Field |Default |Description |ip-range-start | |If specified, Pacemaker will create an implicit +ocf:heartbeat:IPaddr2+ resource for each container instance, starting with this IP address, using up to +replicas+ sequential addresses. These addresses can be used from the host's network to reach the service inside the container, though it is not visible within the container itself. Only IPv4 addresses are currently supported. indexterm:[ip-range-start,network] indexterm:[network,Property,ip-range-start] |host-netmask |32 |If +ip-range-start+ is specified, the IP addresses are created with this CIDR netmask (as a number of bits). indexterm:[host-netmask,network] indexterm:[network,Property,host-netmask] |host-interface | |If +ip-range-start+ is specified, the IP addresses are created on this host interface (by default, it will be determined from the IP address). indexterm:[host-interface,network] indexterm:[network,Property,host-interface] |control-port | |If the bundle contains a +primitive+, the cluster will use this integer TCP port for communication with Pacemaker Remote inside the container. This takes precedence over the value of any PCMK_remote_port environment variable set in the container image. This can allow a +primitive+ to be specified without using +ip-start-range+, in which case +replicas-per-host+ must be 1. indexterm:[control-port,network] indexterm:[network,Property,control-port] |========================================================= +[NOTE] +==== +If +ip-range-start+ is used, Pacemaker will automatically ensure that ++/etc/hosts+ inside the containers has entries for each replica and its +assigned IP. Replicas are named by the bundle id plus a dash and an integer +counter starting with zero. For example, if a bundle named +httpd-bundle+ has ++replicas=2+, its containers will be named +httpd-bundle-0+ and ++httpd-bundle-1+. +==== + Additionally, a ++ element may optionally contain one or more ++ elements. indexterm:[bundle,network,port-mapping] .Properties of a Bundle's Port-Mapping Element [width="95%",cols="2m,1,4<",options="header",align="center"] |========================================================= |Field |Default |Description |id | |A unique name for the port mapping (required) indexterm:[id,port-mapping] indexterm:[port-mapping,Property,id] |port | |If this is specified, connections to this TCP port number on the host network (on the container's assigned IP address, if +ip-range-start+ is specified) will be forwarded to the container network. Exactly one of +port+ or +range+ must be specified in a +port-mapping+. indexterm:[port,port-mapping] indexterm:[port-mapping,Property,port] |internal-port |value of +port+ |If +port+ and this are specified, connections to +port+ on the host's network will be forwarded to this port on the container network. indexterm:[internal-port,port-mapping] indexterm:[port-mapping,Property,internal-port] |range | |If this is specified, connections to these TCP port numbers (expressed as 'first_port'-'last_port') on the host network (on the container's assigned IP address, if +ip-range-start+ is specified) will be forwarded to the same ports in the container network. Exactly one of +port+ or +range+ must be specified in a +port-mapping+. indexterm:[range,port-mapping] indexterm:[port-mapping,Property,range] |========================================================= [NOTE] ==== If the bundle contains a +primitive+, Pacemaker will automatically map the +control-port+, so it is not necessary to specify that port in a +port-mapping+. ==== === Bundle Storage Properties === A bundle may optionally contain one ++ element. A ++ element has no properties of its own, but may contain one or more ++ elements. indexterm:[bundle,storage,storage-mapping] .Properties of a Bundle's Storage-Mapping Element [width="95%",cols="2m,1,4<",options="header",align="center"] |========================================================= |Field |Default |Description |id | |A unique name for the storage mapping (required) indexterm:[id,storage-mapping] indexterm:[storage-mapping,Property,id] |source-dir | |The absolute path on the host's filesystem that will be mapped into the container. Exactly one of +source-dir+ and +source-dir-root+ must be specified in a +storage-mapping+. indexterm:[source-dir,storage-mapping] indexterm:[storage-mapping,Property,source-dir] |source-dir-root | |The start of a path on the host's filesystem that will be mapped into the container, using a different subdirectory on the host for each container instance. Exactly one of +source-dir+ and +source-dir-root+ must be specified in a +storage-mapping+. indexterm:[source-dir-root,storage-mapping] indexterm:[storage-mapping,Property,source-dir-root] |target-dir | |The path name within the container where the host storage will be mapped (required) indexterm:[target-dir,storage-mapping] indexterm:[storage-mapping,Property,target-dir] |options | |File system mount options to use when mapping the storage indexterm:[options,storage-mapping] indexterm:[storage-mapping,Property,options] |========================================================= [NOTE] ==== If the bundle contains a +primitive+, Pacemaker will automatically map the equivalent of +source-dir=/etc/pacemaker/authkey target-dir=/etc/pacemaker/authkey+ -and +source-dir-root=/var/log/container target-dir=/var/log+ into the +and +source-dir-root=/var/log/pacemaker/bundles target-dir=/var/log+ into the container, so it is not necessary to specify those paths in a -+storage-mapping+. ++storage-mapping+. Newer versions of +ocf:heartbeat:docker+ will automatically +create the source directories if they do not exist, but the user may want to +ensure they exist beforehand. ==== === Bundle Primitive === A bundle may optionally contain one ++ resource (see <>). The primitive may have operations, instance attributes and meta-attributes defined, as usual. If a bundle contains a primitive resource, the container image must include the Pacemaker Remote daemon, and either +ip-range-start+ or +control-port+ must be configured in the bundle. Pacemaker will create an implicit +ocf:pacemaker:remote+ resource for the connection, launch Pacemaker Remote within the container, and monitor and manage the primitive resource via Pacemaker Remote. If the bundle has more than one container instance (replica), the primitive resource will function as an implicit clone (see <>) -- a multistate clone if the bundle has +masters+ greater than zero (see <>). [IMPORTANT] ==== If Pacemaker Remote nodes are used in a cluster with bundle resources that contain a +primitive+, it is strongly recommended to configure location constraints prohibiting the bundles from running on those nodes. Running a bundle with a +primitive+ on a Pacemaker Remote node might work if +ip-range-start+ is used, but that configuration has not yet been tested. ==== === Limitations of Bundles === Currently, bundles may not be cloned, or included in groups or colocation constraints. This includes the bundle's primitive and any resources implicitly created by Pacemaker for the bundle. Bundles do not have instance attributes, meta-attributes, utilization attributes, or operations of their own, though a primitive included in a bundle may. diff --git a/extra/logrotate/pacemaker.in b/extra/logrotate/pacemaker.in index d382433525..2fae2bf682 100644 --- a/extra/logrotate/pacemaker.in +++ b/extra/logrotate/pacemaker.in @@ -1,10 +1,10 @@ -@localstatedir@/log/pacemaker.log { +@localstatedir@/log/pacemaker.log @CRM_LOG_DIR@/bundles/*/pacemaker.log { compress dateext weekly rotate 99 maxage 365 notifempty missingok copytruncate } diff --git a/include/crm_config.h.in b/include/crm_config.h.in index 30cec808f7..7eb01d424a 100755 --- a/include/crm_config.h.in +++ b/include/crm_config.h.in @@ -1,102 +1,105 @@ /* crm_config.h.in */ /* * Copyright (C) 2006 - 2007 * Andrew Beekhof * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM_CONFIG__H__IN #define CRM_CONFIG__H__IN /****** Versions ******/ /* Current pacemaker version */ #undef PACEMAKER_VERSION /* Build version */ #undef BUILD_VERSION /* Current version of the Pacemaker Relax-NG Schema */ #undef CRM_DTD_VERSION /****** Other ******/ /* Group to run Pacemaker daemons as */ #undef CRM_DAEMON_GROUP /* User to run Pacemaker daemons as */ #undef CRM_DAEMON_USER /****** Directories ******/ +/* Where Pacemaker can store log files */ +#undef CRM_LOG_DIR + /* Location for Pacemaker daemons */ #undef CRM_DAEMON_DIR /* Where to keep blackbox dumps */ #undef CRM_BLACKBOX_DIR /* Where to keep configuration files */ #undef CRM_CONFIG_DIR /* Where to keep PEngine outputs */ #undef PE_STATE_DIR /* Location to store core files produced by Pacemaker daemons */ #undef CRM_CORE_DIR /* Where to keep state files and sockets */ #undef CRM_STATE_DIR /* Location for the Pacemaker Relax-NG Schema */ #undef CRM_DTD_DIRECTORY /****** Features ******/ /* Set of enabled features */ #undef CRM_FEATURES /* Support the Heartbeat messaging and membership layer */ #undef SUPPORT_HEARTBEAT /* Support the Corosync messaging and membership layer */ #undef SUPPORT_COROSYNC /* Support the Pacemaker plugin for Corosync */ #undef SUPPORT_PLUGIN /* Support the consumption of membership and quorum from corosync */ #undef SUPPORT_CS_QUORUM /* Support systemd based system services */ #undef SUPPORT_SYSTEMD /* Support upstart based system services */ #undef SUPPORT_UPSTART /****** Compatibility ******/ /* Does corosync use libqb for its ipc */ #undef CS_USES_LIBQB /* Compatibility alias for SUPPORT_COROSYNC */ #undef SUPPORT_AIS /* Compatibility alias for SUPPORT_COROSYNC */ #undef AIS_COROSYNC #endif /* CRM_CONFIG__H__IN */ diff --git a/lib/common/logging.c b/lib/common/logging.c index b6081438eb..b9a1c805b1 100644 --- a/lib/common/logging.c +++ b/lib/common/logging.c @@ -1,1262 +1,1266 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include unsigned int crm_log_priority = LOG_NOTICE; unsigned int crm_log_level = LOG_INFO; static gboolean crm_tracing_enabled(void); unsigned int crm_trace_nonlog = 0; bool crm_is_daemon = 0; #ifdef HAVE_G_LOG_SET_DEFAULT_HANDLER GLogFunc glib_log_default; static void crm_glib_handler(const gchar * log_domain, GLogLevelFlags flags, const gchar * message, gpointer user_data) { int log_level = LOG_WARNING; GLogLevelFlags msg_level = (flags & G_LOG_LEVEL_MASK); static struct qb_log_callsite *glib_cs = NULL; if (glib_cs == NULL) { glib_cs = qb_log_callsite_get(__FUNCTION__, __FILE__, "glib-handler", LOG_DEBUG, __LINE__, crm_trace_nonlog); } switch (msg_level) { case G_LOG_LEVEL_CRITICAL: log_level = LOG_CRIT; if (crm_is_callsite_active(glib_cs, LOG_DEBUG, 0) == FALSE) { /* log and record how we got here */ crm_abort(__FILE__, __FUNCTION__, __LINE__, message, TRUE, TRUE); } break; case G_LOG_LEVEL_ERROR: log_level = LOG_ERR; break; case G_LOG_LEVEL_MESSAGE: log_level = LOG_NOTICE; break; case G_LOG_LEVEL_INFO: log_level = LOG_INFO; break; case G_LOG_LEVEL_DEBUG: log_level = LOG_DEBUG; break; case G_LOG_LEVEL_WARNING: case G_LOG_FLAG_RECURSION: case G_LOG_FLAG_FATAL: case G_LOG_LEVEL_MASK: log_level = LOG_WARNING; break; } do_crm_log(log_level, "%s: %s", log_domain, message); } #endif #ifndef NAME_MAX # define NAME_MAX 256 #endif static void crm_trigger_blackbox(int nsig) { if(nsig == SIGTRAP) { /* Turn it on if it wasn't already */ crm_enable_blackbox(nsig); } crm_write_blackbox(nsig, NULL); } const char * daemon_option(const char *option) { char env_name[NAME_MAX]; const char *value = NULL; snprintf(env_name, NAME_MAX, "PCMK_%s", option); value = getenv(env_name); if (value != NULL) { crm_trace("Found %s = %s", env_name, value); return value; } snprintf(env_name, NAME_MAX, "HA_%s", option); value = getenv(env_name); if (value != NULL) { crm_trace("Found %s = %s", env_name, value); return value; } crm_trace("Nothing found for %s", option); return NULL; } void set_daemon_option(const char *option, const char *value) { char env_name[NAME_MAX]; snprintf(env_name, NAME_MAX, "PCMK_%s", option); if (value) { crm_trace("Setting %s to %s", env_name, value); setenv(env_name, value, 1); } else { crm_trace("Unsetting %s", env_name); unsetenv(env_name); } snprintf(env_name, NAME_MAX, "HA_%s", option); if (value) { crm_trace("Setting %s to %s", env_name, value); setenv(env_name, value, 1); } else { crm_trace("Unsetting %s", env_name); unsetenv(env_name); } } gboolean daemon_option_enabled(const char *daemon, const char *option) { const char *value = daemon_option(option); if (value != NULL && crm_is_true(value)) { return TRUE; } else if (value != NULL && strstr(value, daemon)) { return TRUE; } return FALSE; } void crm_log_deinit(void) { #ifdef HAVE_G_LOG_SET_DEFAULT_HANDLER g_log_set_default_handler(glib_log_default, NULL); #endif } #define FMT_MAX 256 static void set_format_string(int method, const char *daemon) { int offset = 0; char fmt[FMT_MAX]; if (method > QB_LOG_STDERR) { /* When logging to a file */ struct utsname res; if (uname(&res) == 0) { offset += snprintf(fmt + offset, FMT_MAX - offset, "%%t [%d] %s %10s: ", getpid(), res.nodename, daemon); } else { offset += snprintf(fmt + offset, FMT_MAX - offset, "%%t [%d] %10s: ", getpid(), daemon); } } if (method == QB_LOG_SYSLOG) { offset += snprintf(fmt + offset, FMT_MAX - offset, "%%g %%-7p: %%b"); crm_extended_logging(method, QB_FALSE); } else if (crm_tracing_enabled()) { offset += snprintf(fmt + offset, FMT_MAX - offset, "(%%-12f:%%5l %%g) %%-7p: %%n:\t%%b"); } else { offset += snprintf(fmt + offset, FMT_MAX - offset, "%%g %%-7p: %%n:\t%%b"); } CRM_LOG_ASSERT(offset > 0); qb_log_format_set(method, fmt); } gboolean crm_add_logfile(const char *filename) { bool is_default = false; static int default_fd = -1; static gboolean have_logfile = FALSE; + + /* @COMPAT This should be CRM_LOG_DIR "/pacemaker.log". We aren't changing + * it yet because it will be a significant user-visible change to publicize. + */ const char *default_logfile = "/var/log/pacemaker.log"; struct stat parent; int fd = 0, rc = 0; FILE *logfile = NULL; char *parent_dir = NULL; char *filename_cp; if (filename == NULL && have_logfile == FALSE) { filename = default_logfile; } if (filename == NULL) { return FALSE; /* Nothing to do */ } else if(safe_str_eq(filename, "none")) { return FALSE; /* Nothing to do */ } else if(safe_str_eq(filename, "/dev/null")) { return FALSE; /* Nothing to do */ } else if(safe_str_eq(filename, default_logfile)) { is_default = TRUE; } if(is_default && default_fd >= 0) { return TRUE; /* Nothing to do */ } /* Check the parent directory */ filename_cp = strdup(filename); parent_dir = dirname(filename_cp); rc = stat(parent_dir, &parent); if (rc != 0) { crm_err("Directory '%s' does not exist: logging to '%s' is disabled", parent_dir, filename); free(filename_cp); return FALSE; } free(filename_cp); errno = 0; logfile = fopen(filename, "a"); if(logfile == NULL) { crm_err("%s (%d): Logging to '%s' as uid=%u, gid=%u is disabled", pcmk_strerror(errno), errno, filename, geteuid(), getegid()); return FALSE; } /* Check/Set permissions if we're root */ if (geteuid() == 0) { struct stat st; uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; gboolean fix = FALSE; int logfd = fileno(logfile); rc = fstat(logfd, &st); if (rc < 0) { crm_perror(LOG_WARNING, "Cannot stat %s", filename); fclose(logfile); return FALSE; } if(crm_user_lookup(CRM_DAEMON_USER, &pcmk_uid, &pcmk_gid) == 0) { if (st.st_gid != pcmk_gid) { /* Wrong group */ fix = TRUE; } else if ((st.st_mode & S_IRWXG) != (S_IRGRP | S_IWGRP)) { /* Not read/writable by the correct group */ fix = TRUE; } } if (fix) { rc = fchown(logfd, pcmk_uid, pcmk_gid); if (rc < 0) { crm_warn("Cannot change the ownership of %s to user %s and gid %d", filename, CRM_DAEMON_USER, pcmk_gid); } rc = fchmod(logfd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); if (rc < 0) { crm_warn("Cannot change the mode of %s to rw-rw----", filename); } fprintf(logfile, "Set r/w permissions for uid=%d, gid=%d on %s\n", pcmk_uid, pcmk_gid, filename); if (fflush(logfile) < 0 || fsync(logfd) < 0) { crm_err("Couldn't write out logfile: %s", filename); } } } /* Close and reopen with libqb */ fclose(logfile); fd = qb_log_file_open(filename); if (fd < 0) { crm_perror(LOG_WARNING, "Couldn't send additional logging to %s", filename); return FALSE; } if(is_default) { default_fd = fd; } else if(default_fd >= 0) { crm_notice("Switching to %s", filename); qb_log_ctl(default_fd, QB_LOG_CONF_ENABLED, QB_FALSE); } crm_notice("Additional logging available in %s", filename); qb_log_ctl(fd, QB_LOG_CONF_ENABLED, QB_TRUE); /* qb_log_ctl(fd, QB_LOG_CONF_FILE_SYNC, 1); Turn on synchronous writes */ /* Enable callsites */ crm_update_callsites(); have_logfile = TRUE; return TRUE; } static int blackbox_trigger = 0; static char *blackbox_file_prefix = NULL; static void blackbox_logger(int32_t t, struct qb_log_callsite *cs, time_t timestamp, const char *msg) { if(cs && cs->priority < LOG_ERR) { crm_write_blackbox(SIGTRAP, cs); /* Bypass the over-dumping logic */ } else { crm_write_blackbox(0, cs); } } static void crm_control_blackbox(int nsig, bool enable) { int lpc = 0; if (blackbox_file_prefix == NULL) { pid_t pid = getpid(); blackbox_file_prefix = malloc(NAME_MAX); snprintf(blackbox_file_prefix, NAME_MAX, "%s/%s-%d", CRM_BLACKBOX_DIR, crm_system_name, pid); } if (enable && qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_SIZE, 5 * 1024 * 1024); /* Any size change drops existing entries */ qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_TRUE); /* Setting the size seems to disable it */ /* Enable synchronous logging */ for (lpc = QB_LOG_BLACKBOX; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_FILE_SYNC, QB_TRUE); } crm_notice("Initiated blackbox recorder: %s", blackbox_file_prefix); /* Save to disk on abnormal termination */ crm_signal(SIGSEGV, crm_trigger_blackbox); crm_signal(SIGABRT, crm_trigger_blackbox); crm_signal(SIGILL, crm_trigger_blackbox); crm_signal(SIGBUS, crm_trigger_blackbox); crm_update_callsites(); blackbox_trigger = qb_log_custom_open(blackbox_logger, NULL, NULL, NULL); qb_log_ctl(blackbox_trigger, QB_LOG_CONF_ENABLED, QB_TRUE); crm_trace("Trigger: %d is %d %d", blackbox_trigger, qb_log_ctl(blackbox_trigger, QB_LOG_CONF_STATE_GET, 0), QB_LOG_STATE_ENABLED); crm_update_callsites(); } else if (!enable && qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_STATE_GET, 0) == QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); /* Disable synchronous logging again when the blackbox is disabled */ for (lpc = QB_LOG_BLACKBOX; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_FILE_SYNC, QB_FALSE); } } } void crm_enable_blackbox(int nsig) { crm_control_blackbox(nsig, TRUE); } void crm_disable_blackbox(int nsig) { crm_control_blackbox(nsig, FALSE); } void crm_write_blackbox(int nsig, struct qb_log_callsite *cs) { static int counter = 1; static time_t last = 0; char buffer[NAME_MAX]; time_t now = time(NULL); if (blackbox_file_prefix == NULL) { return; } switch (nsig) { case 0: case SIGTRAP: /* The graceful case - such as assertion failure or user request */ if (nsig == 0 && now == last) { /* Prevent over-dumping */ return; } snprintf(buffer, NAME_MAX, "%s.%d", blackbox_file_prefix, counter++); if (nsig == SIGTRAP) { crm_notice("Blackbox dump requested, please see %s for contents", buffer); } else if (cs) { syslog(LOG_NOTICE, "Problem detected at %s:%d (%s), please see %s for additional details", cs->function, cs->lineno, cs->filename, buffer); } else { crm_notice("Problem detected, please see %s for additional details", buffer); } last = now; qb_log_blackbox_write_to_file(buffer); /* Flush the existing contents * A size change would also work */ qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_TRUE); break; default: /* Do as little as possible, just try to get what we have out * We logged the filename when the blackbox was enabled */ crm_signal(nsig, SIG_DFL); qb_log_blackbox_write_to_file(blackbox_file_prefix); qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); raise(nsig); break; } } gboolean crm_log_cli_init(const char *entity) { return crm_log_init(entity, LOG_ERR, FALSE, FALSE, 0, NULL, TRUE); } static const char * crm_quark_to_string(uint32_t tag) { const char *text = g_quark_to_string(tag); if (text) { return text; } return ""; } static void crm_log_filter_source(int source, const char *trace_files, const char *trace_fns, const char *trace_fmts, const char *trace_tags, const char *trace_blackbox, struct qb_log_callsite *cs) { if (qb_log_ctl(source, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { return; } else if (cs->tags != crm_trace_nonlog && source == QB_LOG_BLACKBOX) { /* Blackbox gets everything if enabled */ qb_bit_set(cs->targets, source); } else if (source == blackbox_trigger && blackbox_trigger > 0) { /* Should this log message result in the blackbox being dumped */ if (cs->priority <= LOG_ERR) { qb_bit_set(cs->targets, source); } else if (trace_blackbox) { char *key = crm_strdup_printf("%s:%d", cs->function, cs->lineno); if (strstr(trace_blackbox, key) != NULL) { qb_bit_set(cs->targets, source); } free(key); } } else if (source == QB_LOG_SYSLOG) { /* No tracing to syslog */ if (cs->priority <= crm_log_priority && cs->priority <= crm_log_level) { qb_bit_set(cs->targets, source); } /* Log file tracing options... */ } else if (cs->priority <= crm_log_level) { qb_bit_set(cs->targets, source); } else if (trace_files && strstr(trace_files, cs->filename) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_fns && strstr(trace_fns, cs->function) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_fmts && strstr(trace_fmts, cs->format) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_tags && cs->tags != 0 && cs->tags != crm_trace_nonlog && g_quark_to_string(cs->tags) != NULL) { qb_bit_set(cs->targets, source); } } static void crm_log_filter(struct qb_log_callsite *cs) { int lpc = 0; static int need_init = 1; static const char *trace_fns = NULL; static const char *trace_tags = NULL; static const char *trace_fmts = NULL; static const char *trace_files = NULL; static const char *trace_blackbox = NULL; if (need_init) { need_init = 0; trace_fns = getenv("PCMK_trace_functions"); trace_fmts = getenv("PCMK_trace_formats"); trace_tags = getenv("PCMK_trace_tags"); trace_files = getenv("PCMK_trace_files"); trace_blackbox = getenv("PCMK_trace_blackbox"); if (trace_tags != NULL) { uint32_t tag; char token[500]; const char *offset = NULL; const char *next = trace_tags; do { offset = next; next = strchrnul(offset, ','); snprintf(token, 499, "%.*s", (int)(next - offset), offset); tag = g_quark_from_string(token); crm_info("Created GQuark %u from token '%s' in '%s'", tag, token, trace_tags); if (next[0] != 0) { next++; } } while (next != NULL && next[0] != 0); } } cs->targets = 0; /* Reset then find targets to enable */ for (lpc = QB_LOG_SYSLOG; lpc < QB_LOG_TARGET_MAX; lpc++) { crm_log_filter_source(lpc, trace_files, trace_fns, trace_fmts, trace_tags, trace_blackbox, cs); } } gboolean crm_is_callsite_active(struct qb_log_callsite *cs, uint8_t level, uint32_t tags) { gboolean refilter = FALSE; if (cs == NULL) { return FALSE; } if (cs->priority != level) { cs->priority = level; refilter = TRUE; } if (cs->tags != tags) { cs->tags = tags; refilter = TRUE; } if (refilter) { crm_log_filter(cs); } if (cs->targets == 0) { return FALSE; } return TRUE; } void crm_update_callsites(void) { static gboolean log = TRUE; if (log) { log = FALSE; crm_debug ("Enabling callsites based on priority=%d, files=%s, functions=%s, formats=%s, tags=%s", crm_log_level, getenv("PCMK_trace_files"), getenv("PCMK_trace_functions"), getenv("PCMK_trace_formats"), getenv("PCMK_trace_tags")); } qb_log_filter_fn_set(crm_log_filter); } static gboolean crm_tracing_enabled(void) { if (crm_log_level >= LOG_TRACE) { return TRUE; } else if (getenv("PCMK_trace_files") || getenv("PCMK_trace_functions") || getenv("PCMK_trace_formats") || getenv("PCMK_trace_tags")) { return TRUE; } return FALSE; } static int crm_priority2int(const char *name) { struct syslog_names { const char *name; int priority; }; static struct syslog_names p_names[] = { {"emerg", LOG_EMERG}, {"alert", LOG_ALERT}, {"crit", LOG_CRIT}, {"error", LOG_ERR}, {"warning", LOG_WARNING}, {"notice", LOG_NOTICE}, {"info", LOG_INFO}, {"debug", LOG_DEBUG}, {NULL, -1} }; int lpc; for (lpc = 0; name != NULL && p_names[lpc].name != NULL; lpc++) { if (crm_str_eq(p_names[lpc].name, name, TRUE)) { return p_names[lpc].priority; } } return crm_log_priority; } static void crm_identity(const char *entity, int argc, char **argv) { if(crm_system_name != NULL) { /* Nothing to do */ } else if (entity) { free(crm_system_name); crm_system_name = strdup(entity); } else if (argc > 0 && argv != NULL) { char *mutable = strdup(argv[0]); char *modified = basename(mutable); if (strstr(modified, "lt-") == modified) { modified += 3; } free(crm_system_name); crm_system_name = strdup(modified); free(mutable); } else if (crm_system_name == NULL) { crm_system_name = strdup("Unknown"); } setenv("PCMK_service", crm_system_name, 1); } void crm_log_preinit(const char *entity, int argc, char **argv) { /* Configure libqb logging with nothing turned on */ int lpc = 0; int32_t qb_facility = 0; static bool have_logging = FALSE; if(have_logging == FALSE) { have_logging = TRUE; crm_xml_init(); /* Sets buffer allocation strategy */ if (crm_trace_nonlog == 0) { crm_trace_nonlog = g_quark_from_static_string("Pacemaker non-logging tracepoint"); } umask(S_IWGRP | S_IWOTH | S_IROTH); /* Redirect messages from glib functions to our handler */ #ifdef HAVE_G_LOG_SET_DEFAULT_HANDLER glib_log_default = g_log_set_default_handler(crm_glib_handler, NULL); #endif /* and for good measure... - this enum is a bit field (!) */ g_log_set_always_fatal((GLogLevelFlags) 0); /*value out of range */ /* Who do we log as */ crm_identity(entity, argc, argv); qb_facility = qb_log_facility2int("local0"); qb_log_init(crm_system_name, qb_facility, LOG_ERR); crm_log_level = LOG_CRIT; /* Nuke any syslog activity until it's asked for */ qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_FALSE); /* Set format strings and disable threading * Pacemaker and threads do not mix well (due to the amount of forking) */ qb_log_tags_stringify_fn_set(crm_quark_to_string); for (lpc = QB_LOG_SYSLOG; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_THREADED, QB_FALSE); set_format_string(lpc, crm_system_name); } } } gboolean crm_log_init(const char *entity, uint8_t level, gboolean daemon, gboolean to_stderr, int argc, char **argv, gboolean quiet) { const char *syslog_priority = NULL; const char *logfile = daemon_option("logfile"); const char *facility = daemon_option("logfacility"); const char *f_copy = facility; crm_is_daemon = daemon; crm_log_preinit(entity, argc, argv); if(level > crm_log_level) { crm_log_level = level; } /* Should we log to syslog */ if (facility == NULL) { if(crm_is_daemon) { facility = "daemon"; } else { facility = "none"; } set_daemon_option("logfacility", facility); } if (safe_str_eq(facility, "none")) { quiet = TRUE; } else { qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_FACILITY, qb_log_facility2int(facility)); } if (daemon_option_enabled(crm_system_name, "debug")) { /* Override the default setting */ crm_log_level = LOG_DEBUG; } /* What lower threshold do we have for sending to syslog */ syslog_priority = daemon_option("logpriority"); if(syslog_priority) { int priority = crm_priority2int(syslog_priority); crm_log_priority = priority; qb_log_filter_ctl(QB_LOG_SYSLOG, QB_LOG_FILTER_ADD, QB_LOG_FILTER_FILE, "*", priority); } else { qb_log_filter_ctl(QB_LOG_SYSLOG, QB_LOG_FILTER_ADD, QB_LOG_FILTER_FILE, "*", LOG_NOTICE); } if (!quiet) { /* Nuke any syslog activity */ qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_TRUE); } /* Should we log to stderr */ if (daemon_option_enabled(crm_system_name, "stderr")) { /* Override the default setting */ to_stderr = TRUE; } crm_enable_stderr(to_stderr); /* Should we log to a file */ if (safe_str_eq("none", logfile)) { /* No soup^Hlogs for you! */ } else if(crm_is_daemon) { /* The daemons always get a log file, unless explicitly set to configured 'none' */ crm_add_logfile(logfile); } else if(logfile) { crm_add_logfile(logfile); } if (crm_is_daemon && daemon_option_enabled(crm_system_name, "blackbox")) { crm_enable_blackbox(0); } /* Summary */ crm_trace("Quiet: %d, facility %s", quiet, f_copy); daemon_option("logfile"); daemon_option("logfacility"); crm_update_callsites(); /* Ok, now we can start logging... */ if (quiet == FALSE && crm_is_daemon == FALSE) { crm_log_args(argc, argv); } if (crm_is_daemon) { const char *user = getenv("USER"); if (user != NULL && safe_str_neq(user, "root") && safe_str_neq(user, CRM_DAEMON_USER)) { crm_trace("Not switching to corefile directory for %s", user); crm_is_daemon = FALSE; } } if (crm_is_daemon) { int user = getuid(); const char *base = CRM_CORE_DIR; struct passwd *pwent = getpwuid(user); if (pwent == NULL) { crm_perror(LOG_ERR, "Cannot get name for uid: %d", user); } else if (safe_str_neq(pwent->pw_name, "root") && safe_str_neq(pwent->pw_name, CRM_DAEMON_USER)) { crm_trace("Don't change active directory for regular user: %s", pwent->pw_name); } else if (chdir(base) < 0) { crm_perror(LOG_INFO, "Cannot change active directory to %s", base); } else { crm_info("Changed active directory to %s", base); #if 0 { char path[512]; snprintf(path, 512, "%s-%d", crm_system_name, getpid()); mkdir(path, 0750); chdir(path); crm_info("Changed active directory to %s/%s/%s", base, pwent->pw_name, path); } #endif } /* Original meanings from signal(7) * * Signal Value Action Comment * SIGTRAP 5 Core Trace/breakpoint trap * SIGUSR1 30,10,16 Term User-defined signal 1 * SIGUSR2 31,12,17 Term User-defined signal 2 * * Our usage is as similar as possible */ mainloop_add_signal(SIGUSR1, crm_enable_blackbox); mainloop_add_signal(SIGUSR2, crm_disable_blackbox); mainloop_add_signal(SIGTRAP, crm_trigger_blackbox); } return TRUE; } /* returns the old value */ unsigned int set_crm_log_level(unsigned int level) { unsigned int old = crm_log_level; crm_log_level = level; crm_update_callsites(); crm_trace("New log level: %d", level); return old; } void crm_enable_stderr(int enable) { if (enable && qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_ENABLED, QB_TRUE); crm_update_callsites(); } else if (enable == FALSE) { qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_ENABLED, QB_FALSE); } } void crm_bump_log_level(int argc, char **argv) { static int args = TRUE; int level = crm_log_level; if (args && argc > 1) { crm_log_args(argc, argv); } if (qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_STATE_GET, 0) == QB_LOG_STATE_ENABLED) { set_crm_log_level(level + 1); } /* Enable after potentially logging the argstring, not before */ crm_enable_stderr(TRUE); } unsigned int get_crm_log_level(void) { return crm_log_level; } #define ARGS_FMT "Invoked: %s" void crm_log_args(int argc, char **argv) { int lpc = 0; int len = 0; int existing_len = 0; int line = __LINE__; static int logged = 0; char *arg_string = NULL; if (argc == 0 || argv == NULL || logged) { return; } logged = 1; for (; lpc < argc; lpc++) { if (argv[lpc] == NULL) { break; } len = 2 + strlen(argv[lpc]); /* +1 space, +1 EOS */ arg_string = realloc_safe(arg_string, len + existing_len); existing_len += sprintf(arg_string + existing_len, "%s ", argv[lpc]); } qb_log_from_external_source(__func__, __FILE__, ARGS_FMT, LOG_NOTICE, line, 0, arg_string); free(arg_string); } const char * pcmk_errorname(int rc) { int error = ABS(rc); switch (error) { case E2BIG: return "E2BIG"; case EACCES: return "EACCES"; case EADDRINUSE: return "EADDRINUSE"; case EADDRNOTAVAIL: return "EADDRNOTAVAIL"; case EAFNOSUPPORT: return "EAFNOSUPPORT"; case EAGAIN: return "EAGAIN"; case EALREADY: return "EALREADY"; case EBADF: return "EBADF"; case EBADMSG: return "EBADMSG"; case EBUSY: return "EBUSY"; case ECANCELED: return "ECANCELED"; case ECHILD: return "ECHILD"; case ECOMM: return "ECOMM"; case ECONNABORTED: return "ECONNABORTED"; case ECONNREFUSED: return "ECONNREFUSED"; case ECONNRESET: return "ECONNRESET"; /* case EDEADLK: return "EDEADLK"; */ case EDESTADDRREQ: return "EDESTADDRREQ"; case EDOM: return "EDOM"; case EDQUOT: return "EDQUOT"; case EEXIST: return "EEXIST"; case EFAULT: return "EFAULT"; case EFBIG: return "EFBIG"; case EHOSTDOWN: return "EHOSTDOWN"; case EHOSTUNREACH: return "EHOSTUNREACH"; case EIDRM: return "EIDRM"; case EILSEQ: return "EILSEQ"; case EINPROGRESS: return "EINPROGRESS"; case EINTR: return "EINTR"; case EINVAL: return "EINVAL"; case EIO: return "EIO"; case EISCONN: return "EISCONN"; case EISDIR: return "EISDIR"; case ELIBACC: return "ELIBACC"; case ELOOP: return "ELOOP"; case EMFILE: return "EMFILE"; case EMLINK: return "EMLINK"; case EMSGSIZE: return "EMSGSIZE"; case EMULTIHOP: return "EMULTIHOP"; case ENAMETOOLONG: return "ENAMETOOLONG"; case ENETDOWN: return "ENETDOWN"; case ENETRESET: return "ENETRESET"; case ENETUNREACH: return "ENETUNREACH"; case ENFILE: return "ENFILE"; case ENOBUFS: return "ENOBUFS"; case ENODATA: return "ENODATA"; case ENODEV: return "ENODEV"; case ENOENT: return "ENOENT"; case ENOEXEC: return "ENOEXEC"; case ENOKEY: return "ENOKEY"; case ENOLCK: return "ENOLCK"; case ENOLINK: return "ENOLINK"; case ENOMEM: return "ENOMEM"; case ENOMSG: return "ENOMSG"; case ENOPROTOOPT: return "ENOPROTOOPT"; case ENOSPC: return "ENOSPC"; case ENOSR: return "ENOSR"; case ENOSTR: return "ENOSTR"; case ENOSYS: return "ENOSYS"; case ENOTBLK: return "ENOTBLK"; case ENOTCONN: return "ENOTCONN"; case ENOTDIR: return "ENOTDIR"; case ENOTEMPTY: return "ENOTEMPTY"; case ENOTSOCK: return "ENOTSOCK"; /* case ENOTSUP: return "ENOTSUP"; */ case ENOTTY: return "ENOTTY"; case ENOTUNIQ: return "ENOTUNIQ"; case ENXIO: return "ENXIO"; case EOPNOTSUPP: return "EOPNOTSUPP"; case EOVERFLOW: return "EOVERFLOW"; case EPERM: return "EPERM"; case EPFNOSUPPORT: return "EPFNOSUPPORT"; case EPIPE: return "EPIPE"; case EPROTO: return "EPROTO"; case EPROTONOSUPPORT: return "EPROTONOSUPPORT"; case EPROTOTYPE: return "EPROTOTYPE"; case ERANGE: return "ERANGE"; case EREMOTE: return "EREMOTE"; case EREMOTEIO: return "EREMOTEIO"; case EROFS: return "EROFS"; case ESHUTDOWN: return "ESHUTDOWN"; case ESPIPE: return "ESPIPE"; case ESOCKTNOSUPPORT: return "ESOCKTNOSUPPORT"; case ESRCH: return "ESRCH"; case ESTALE: return "ESTALE"; case ETIME: return "ETIME"; case ETIMEDOUT: return "ETIMEDOUT"; case ETXTBSY: return "ETXTBSY"; case EUNATCH: return "EUNATCH"; case EUSERS: return "EUSERS"; /* case EWOULDBLOCK: return "EWOULDBLOCK"; */ case EXDEV: return "EXDEV"; #ifdef EBADE /* Not available on OSX */ case EBADE: return "EBADE"; case EBADFD: return "EBADFD"; case EBADSLT: return "EBADSLT"; case EDEADLOCK: return "EDEADLOCK"; case EBADR: return "EBADR"; case EBADRQC: return "EBADRQC"; case ECHRNG: return "ECHRNG"; #ifdef EISNAM /* Not available on Illumos/Solaris */ case EISNAM: return "EISNAM"; case EKEYEXPIRED: return "EKEYEXPIRED"; case EKEYREJECTED: return "EKEYREJECTED"; case EKEYREVOKED: return "EKEYREVOKED"; #endif case EL2HLT: return "EL2HLT"; case EL2NSYNC: return "EL2NSYNC"; case EL3HLT: return "EL3HLT"; case EL3RST: return "EL3RST"; case ELIBBAD: return "ELIBBAD"; case ELIBMAX: return "ELIBMAX"; case ELIBSCN: return "ELIBSCN"; case ELIBEXEC: return "ELIBEXEC"; #ifdef ENOMEDIUM /* Not available on Illumos/Solaris */ case ENOMEDIUM: return "ENOMEDIUM"; case EMEDIUMTYPE: return "EMEDIUMTYPE"; #endif case ENONET: return "ENONET"; case ENOPKG: return "ENOPKG"; case EREMCHG: return "EREMCHG"; case ERESTART: return "ERESTART"; case ESTRPIPE: return "ESTRPIPE"; #ifdef EUCLEAN /* Not available on Illumos/Solaris */ case EUCLEAN: return "EUCLEAN"; #endif case EXFULL: return "EXFULL"; #endif case pcmk_err_generic: return "pcmk_err_generic"; case pcmk_err_no_quorum: return "pcmk_err_no_quorum"; case pcmk_err_schema_validation: return "pcmk_err_schema_validation"; case pcmk_err_transform_failed: return "pcmk_err_transform_failed"; case pcmk_err_old_data: return "pcmk_err_old_data"; case pcmk_err_diff_failed: return "pcmk_err_diff_failed"; case pcmk_err_diff_resync: return "pcmk_err_diff_resync"; case pcmk_err_cib_modified: return "pcmk_err_cib_modified"; case pcmk_err_cib_backup: return "pcmk_err_cib_backup"; case pcmk_err_cib_save: return "pcmk_err_cib_save"; case pcmk_err_cib_corrupt: return "pcmk_err_cib_corrupt"; } return "Unknown"; } const char * pcmk_strerror(int rc) { int error = abs(rc); if (error == 0) { return "OK"; } else if (error < PCMK_ERROR_OFFSET) { return strerror(error); } switch (error) { case pcmk_err_generic: return "Generic Pacemaker error"; case pcmk_err_no_quorum: return "Operation requires quorum"; case pcmk_err_schema_validation: return "Update does not conform to the configured schema"; case pcmk_err_transform_failed: return "Schema transform failed"; case pcmk_err_old_data: return "Update was older than existing configuration"; case pcmk_err_diff_failed: return "Application of an update diff failed"; case pcmk_err_diff_resync: return "Application of an update diff failed, requesting a full refresh"; case pcmk_err_cib_modified: return "The on-disk configuration was manually modified"; case pcmk_err_cib_backup: return "Could not archive the previous configuration"; case pcmk_err_cib_save: return "Could not save the new configuration to disk"; case pcmk_err_cib_corrupt: return "Could not parse on-disk configuration"; case pcmk_err_schema_unchanged: return "Schema is already the latest available"; /* The following cases will only be hit on systems for which they are non-standard */ /* coverity[dead_error_condition] False positive on non-Linux */ case ENOTUNIQ: return "Name not unique on network"; /* coverity[dead_error_condition] False positive on non-Linux */ case ECOMM: return "Communication error on send"; /* coverity[dead_error_condition] False positive on non-Linux */ case ELIBACC: return "Can not access a needed shared library"; /* coverity[dead_error_condition] False positive on non-Linux */ case EREMOTEIO: return "Remote I/O error"; /* coverity[dead_error_condition] False positive on non-Linux */ case EUNATCH: return "Protocol driver not attached"; /* coverity[dead_error_condition] False positive on non-Linux */ case ENOKEY: return "Required key not available"; } crm_err("Unknown error code: %d", rc); return "Unknown error"; } const char * bz2_strerror(int rc) { /* http://www.bzip.org/1.0.3/html/err-handling.html */ switch (rc) { case BZ_OK: case BZ_RUN_OK: case BZ_FLUSH_OK: case BZ_FINISH_OK: case BZ_STREAM_END: return "Ok"; case BZ_CONFIG_ERROR: return "libbz2 has been improperly compiled on your platform"; case BZ_SEQUENCE_ERROR: return "library functions called in the wrong order"; case BZ_PARAM_ERROR: return "parameter is out of range or otherwise incorrect"; case BZ_MEM_ERROR: return "memory allocation failed"; case BZ_DATA_ERROR: return "data integrity error is detected during decompression"; case BZ_DATA_ERROR_MAGIC: return "the compressed stream does not start with the correct magic bytes"; case BZ_IO_ERROR: return "error reading or writing in the compressed file"; case BZ_UNEXPECTED_EOF: return "compressed file finishes before the logical end of stream is detected"; case BZ_OUTBUFF_FULL: return "output data will not fit into the buffer provided"; } return "Unknown error"; } void crm_log_output_fn(const char *file, const char *function, int line, int level, const char *prefix, const char *output) { const char *next = NULL; const char *offset = NULL; if (output == NULL) { level = LOG_DEBUG; output = "-- empty --"; } next = output; do { offset = next; next = strchrnul(offset, '\n'); do_crm_log_alias(level, file, function, line, "%s [ %.*s ]", prefix, (int)(next - offset), offset); if (next[0] != 0) { next++; } } while (next != NULL && next[0] != 0); } char * crm_strdup_printf (char const *format, ...) { va_list ap; int len = 0; char *string = NULL; va_start(ap, format); len = vasprintf (&string, format, ap); CRM_ASSERT(len > 0); va_end(ap); return string; } diff --git a/lib/pengine/container.c b/lib/pengine/container.c index 7e99e68c3e..e05287990f 100644 --- a/lib/pengine/container.c +++ b/lib/pengine/container.c @@ -1,900 +1,900 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #define VARIANT_CONTAINER 1 #include "./variant.h" void tuple_free(container_grouping_t *tuple); static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static int allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max) { if(data->ip_range_start == NULL) { return 0; } else if(data->ip_last) { tuple->ipaddr = next_ip(data->ip_last); } else { tuple->ipaddr = strdup(data->ip_range_start); } data->ip_last = tuple->ipaddr; #if 0 return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d", data->prefix, tuple->offset, tuple->ipaddr, data->prefix, tuple->offset, data->prefix, tuple->offset); #else return snprintf(buffer, max, " --add-host=%s-%d:%s", data->prefix, tuple->offset, tuple->ipaddr); #endif } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); crm_xml_add(rsc, XML_ATTR_ID, name); crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, "ocf"); crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider); crm_xml_add(rsc, XML_ATTR_TYPE, kind); return rsc; } static void create_nvp(xmlNode *parent, const char *name, const char *value) { xmlNode *xml_nvp = create_xml_node(parent, XML_CIB_TAG_NVPAIR); crm_xml_set_id(xml_nvp, "%s-%s", ID(parent), name); crm_xml_add(xml_nvp, XML_NVPAIR_ATTR_NAME, name); crm_xml_add(xml_nvp, XML_NVPAIR_ATTR_VALUE, value); } static void create_op(xmlNode *parent, const char *prefix, const char *task, const char *interval) { xmlNode *xml_op = create_xml_node(parent, "op"); crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval); crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval); crm_xml_add(xml_op, "name", task); } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(container_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->replicas_per_host > 1) { pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix); data->replicas_per_host = 1; /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */ } return TRUE; } return FALSE; } static bool create_ip_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr); crm_xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); create_nvp(xml_obj, "ip", tuple->ipaddr); if(data->host_network) { create_nvp(xml_obj, "nic", data->host_network); } if(data->host_netmask) { create_nvp(xml_obj, "cidr_netmask", data->host_netmask); } else { create_nvp(xml_obj, "cidr_netmask", "32"); } xml_obj = create_xml_node(xml_ip, "operations"); create_op(xml_obj, ID(xml_ip), "monitor", "60s"); // TODO: Other ops? Timeouts and intervals from underlying resource? if (common_unpack(xml_ip, &tuple->ip, NULL, data_set) == false) { return FALSE; } parent->children = g_list_append(parent->children, tuple->ip); } return TRUE; } static bool create_docker_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { int offset = 0, max = 4096; char *buffer = calloc(1, max+1); int doffset = 0, dmax = 1024; char *dbuffer = calloc(1, dmax+1); char *id = NULL; xmlNode *xml_docker = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset); crm_xml_sanitize_id(id); xml_docker = create_resource(id, "heartbeat", "docker"); free(id); xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); create_nvp(xml_obj, "image", data->image); create_nvp(xml_obj, "allow_pull", "true"); create_nvp(xml_obj, "force_kill", "false"); create_nvp(xml_obj, "reuse", "false"); offset += snprintf(buffer+offset, max-offset, "-h %s-%d --restart=no ", data->prefix, tuple->offset); if(data->docker_network) { // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr); offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network); } for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) { container_mount_t *mount = pIter->data; if(mount->flags) { char *source = crm_strdup_printf( "%s/%s-%d", mount->source, data->prefix, tuple->offset); if(doffset > 0) { doffset += snprintf(dbuffer+doffset, dmax-doffset, ","); } doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source); offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target); free(source); } else { offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target); } if(mount->options) { offset += snprintf(buffer+offset, max-offset, ":%s", mount->options); } } for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) { container_port_t *port = pIter->data; if(tuple->ipaddr) { offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s", tuple->ipaddr, port->source, port->target); } else { offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target); } } if(data->docker_run_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options); } if(data->docker_host_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options); } create_nvp(xml_obj, "run_opts", buffer); free(buffer); create_nvp(xml_obj, "mount_points", dbuffer); free(dbuffer); if(tuple->child) { char *command = NULL; if(data->control_port) { command = crm_strdup_printf(SBIN_DIR"/pacemaker_remoted -p %s", data->control_port); } else { command = crm_strdup_printf(SBIN_DIR"/pacemaker_remoted -p %d", DEFAULT_REMOTE_PORT); } create_nvp(xml_obj, "run_cmd", command); free(command); /* TODO: Allow users to specify their own? * * We just want to know if the container is alive, we'll * monitor the child independantly */ create_nvp(xml_obj, "monitor_cmd", "/bin/true"); /* } else if(child && data->untrusted) { * Support this use-case? * * The ability to have resources started/stopped by us, but * unable to set attributes, etc. * * Arguably better to control API access this with ACLs like * "normal" remote nodes * * create_nvp(xml_obj, "run_cmd", "/usr/libexec/pacemaker/lrmd"); * create_nvp(xml_obj, "monitor_cmd", "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke"); */ } else { /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want * to know if it is alive */ create_nvp(xml_obj, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_docker, "operations"); create_op(xml_obj, ID(xml_docker), "monitor", "60s"); // TODO: Other ops? Timeouts and intervals from underlying resource? if (common_unpack(xml_docker, &tuple->docker, NULL, data_set) == FALSE) { return FALSE; } parent->children = g_list_append(parent->children, tuple->docker); tuple->docker->parent = parent; return TRUE; } static bool create_remote_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if (tuple->child && valid_network(data)) { node_t *node = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_remote = NULL; char *nodeid = crm_strdup_printf("%s-%d", data->prefix, tuple->offset); char *id = NULL; if (remote_id_conflict(nodeid, data_set)) { // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset); CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE); } else { id = strdup(nodeid); } xml_remote = create_resource(id, "pacemaker", "remote"); free(id); xml_obj = create_xml_node(xml_remote, "operations"); create_op(xml_obj, ID(xml_remote), "monitor", "60s"); xml_obj = create_xml_node(xml_remote, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); if(tuple->ipaddr) { create_nvp(xml_obj, "addr", tuple->ipaddr); } else { create_nvp(xml_obj, "addr", "localhost"); } if(data->control_port) { create_nvp(xml_obj, "port", data->control_port); } else { create_nvp(xml_obj, "port", crm_itoa(DEFAULT_REMOTE_PORT)); } xml_obj = create_xml_node(xml_remote, XML_TAG_META_SETS); crm_xml_set_id(xml_obj, "%s-meta-%d", data->prefix, tuple->offset); create_nvp(xml_obj, XML_OP_ATTR_ALLOW_MIGRATE, "false"); // Sets up node->details->remote_rsc->container == tuple->docker create_nvp(xml_obj, XML_RSC_ATTR_CONTAINER, tuple->docker->id); // TODO: Do this generically, eg with rsc->flags // create_nvp(xml_obj, XML_RSC_ATTR_INTERNAL_RSC, "true"); // Suppress printing // tuple->docker->fillers = g_list_append(tuple->docker->fillers, child); // -INFINITY prevents anyone else from running here node = pe_create_node(strdup(nodeid), nodeid, "remote", "-INFINITY", data_set); tuple->node = node_copy(node); tuple->node->weight = 500; nodeid = NULL; id = NULL; if (common_unpack(xml_remote, &tuple->remote, NULL, data_set) == FALSE) { return FALSE; } tuple->node->details->remote_rsc = tuple->remote; parent->children = g_list_append(parent->children, tuple->remote); } return TRUE; } static bool create_container( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if(create_docker_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(create_ip_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(create_remote_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(tuple->child && tuple->ipaddr) { add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr); } return FALSE; } static void mount_free(container_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(container_port_t *port) { free(port->source); free(port->target); free(port); } gboolean container_unpack(resource_t * rsc, pe_working_set_t * data_set) { const char *value = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_resource = NULL; container_variant_data_t *container_data = NULL; pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); container_data = calloc(1, sizeof(container_variant_data_t)); rsc->variant_opaque = container_data; container_data->prefix = strdup(rsc->id); xml_obj = first_named_child(rsc->xml, "docker"); if(xml_obj == NULL) { return FALSE; } value = crm_element_value(xml_obj, "masters"); container_data->masters = crm_parse_int(value, "0"); if (container_data->masters < 0) { pe_err("'masters' for %s must be nonnegative integer, using 0", rsc->id); container_data->masters = 0; } value = crm_element_value(xml_obj, "replicas"); if ((value == NULL) && (container_data->masters > 0)) { container_data->replicas = container_data->masters; } else { container_data->replicas = crm_parse_int(value, "1"); } if (container_data->replicas < 1) { pe_err("'replicas' for %s must be positive integer, using 1", rsc->id); container_data->replicas = 1; } /* * Communication between containers on the same host via the * floating IPs only works if docker is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, "replicas-per-host"); container_data->replicas_per_host = crm_parse_int(value, "1"); if (container_data->replicas_per_host < 1) { pe_err("'replicas-per-host' for %s must be positive integer, using 1", rsc->id); container_data->replicas_per_host = 1; } if (container_data->replicas_per_host == 1) { clear_bit(rsc->flags, pe_rsc_unique); } container_data->docker_run_options = crm_element_value_copy(xml_obj, "options"); container_data->image = crm_element_value_copy(xml_obj, "image"); container_data->docker_network = crm_element_value_copy(xml_obj, "network"); xml_obj = first_named_child(rsc->xml, "network"); if(xml_obj) { container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start"); container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask"); container_data->host_network = crm_element_value_copy(xml_obj, "host-interface"); container_data->control_port = crm_element_value_copy(xml_obj, "control-port"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { container_port_t *port = calloc(1, sizeof(container_port_t)); port->source = crm_element_value_copy(xml_child, "port"); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, "range"); } else { port->target = crm_element_value_copy(xml_child, "internal-port"); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } container_data->ports = g_list_append(container_data->ports, port); } else { pe_err("Invalid port directive %s", ID(xml_child)); port_free(port); } } } xml_obj = first_named_child(rsc->xml, "storage"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { container_mount_t *mount = calloc(1, sizeof(container_mount_t)); mount->source = crm_element_value_copy(xml_child, "source-dir"); if(mount->source == NULL) { mount->source = crm_element_value_copy(xml_child, "source-dir-root"); mount->flags = 1; } mount->target = crm_element_value_copy(xml_child, "target-dir"); mount->options = crm_element_value_copy(xml_child, "options"); if(mount->source && mount->target) { container_data->mounts = g_list_append(container_data->mounts, mount); } else { pe_err("Invalid mount directive %s", ID(xml_child)); mount_free(mount); } } xml_obj = first_named_child(rsc->xml, "primitive"); if (xml_obj && valid_network(container_data)) { char *value = NULL; xmlNode *xml_set = NULL; if(container_data->masters > 0) { xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER); } else { xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION); } crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name); xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS); - crm_xml_set_id(xml_resource, "%s-%s-meta", container_data->prefix, xml_resource->name); + crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name); create_nvp(xml_set, XML_RSC_ATTR_ORDERED, "true"); value = crm_itoa(container_data->replicas); create_nvp(xml_set, XML_RSC_ATTR_INCARNATION_MAX, value); free(value); value = crm_itoa(container_data->replicas_per_host); create_nvp(xml_set, XML_RSC_ATTR_INCARNATION_NODEMAX, value); free(value); if(container_data->replicas_per_host > 1) { create_nvp(xml_set, XML_RSC_ATTR_UNIQUE, "true"); } else { create_nvp(xml_set, XML_RSC_ATTR_UNIQUE, "false"); } if(container_data->masters) { value = crm_itoa(container_data->masters); create_nvp(xml_set, XML_RSC_ATTR_MASTER_MAX, value); free(value); } //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix); add_node_copy(xml_resource, xml_obj); } else if(xml_obj) { pe_err("Cannot control %s inside %s without either ip-range-start or control-port", rsc->id, ID(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GListPtr childIter = NULL; resource_t *new_rsc = NULL; container_mount_t *mount = NULL; container_port_t *port = calloc(1, sizeof(container_port_t)); int offset = 0, max = 1024; char *buffer = calloc(1, max+1); mount = calloc(1, sizeof(container_mount_t)); mount->source = strdup(DEFAULT_REMOTE_KEY_LOCATION); mount->target = strdup(DEFAULT_REMOTE_KEY_LOCATION); mount->options = NULL; mount->flags = 0; container_data->mounts = g_list_append(container_data->mounts, mount); mount = calloc(1, sizeof(container_mount_t)); - mount->source = strdup("/var/log/containers"); + mount->source = strdup(CRM_LOG_DIR "/bundles"); mount->target = strdup("/var/log"); mount->options = NULL; mount->flags = 1; container_data->mounts = g_list_append(container_data->mounts, mount); if(container_data->control_port) { port->source = strdup(container_data->control_port); } else { port->source = crm_itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); container_data->ports = g_list_append(container_data->ports, port); if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) { pe_err("Failed unpacking resource %s", crm_element_value(rsc->xml, XML_ATTR_ID)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } return FALSE; } container_data->child = new_rsc; container_data->child->orig_xml = xml_obj; // Also the trigger for common_free() // to free xml_resource as container_data->child->xml for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) { container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t)); tuple->child = childIter->data; tuple->offset = lpc++; offset += allocate_ip(container_data, tuple, buffer+offset, max-offset); container_data->tuples = g_list_append(container_data->tuples, tuple); } container_data->docker_host_options = buffer; } else { // Just a naked container, no pacemaker-remote int offset = 0, max = 1024; char *buffer = calloc(1, max+1); for(int lpc = 0; lpc < container_data->replicas; lpc++) { container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t)); tuple->offset = lpc; offset += allocate_ip(container_data, tuple, buffer+offset, max-offset); container_data->tuples = g_list_append(container_data->tuples, tuple); } container_data->docker_host_options = buffer; } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; // TODO: Remove from list if create_container() returns TRUE create_container(rsc, container_data, tuple, data_set); } if(container_data->child) { rsc->children = g_list_append(rsc->children, container_data->child); } return TRUE; } gboolean container_active(resource_t * rsc, gboolean all) { return TRUE; } resource_t * find_container_child(const char *stem, resource_t * rsc, node_t *node) { container_variant_data_t *container_data = NULL; resource_t *parent = uber_parent(rsc); CRM_ASSERT(parent->parent); parent = parent->parent; get_container_variant_data(container_data, parent); if (is_not_set(rsc->flags, pe_rsc_unique)) { for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->node->details == node->details) { rsc = tuple->child; break; } } } if (rsc && safe_str_neq(stem, rsc->id)) { free(rsc->clone_name); rsc->clone_name = strdup(stem); } return rsc; } static void container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data) { container_variant_data_t *container_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (pre_text == NULL) { pre_text = ""; } child_text = crm_concat(pre_text, " ", ' '); status_print("%sid); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print(">\n"); get_container_variant_data(container_data, rsc); status_print("%sDocker container: %s [%s]%s%s", pre_text, rsc->id, container_data->image, is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->ip) { tuple->ip->fns->print(tuple->ip, child_text, options, print_data); } if(tuple->child) { tuple->child->fns->print(tuple->child, child_text, options, print_data); } if(tuple->docker) { tuple->docker->fns->print(tuple->docker, child_text, options, print_data); } if(tuple->remote) { tuple->remote->fns->print(tuple->remote, child_text, options, print_data); } } status_print("%s\n", pre_text); free(child_text); } static void tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data) { node_t *node = NULL; resource_t *rsc = tuple->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = tuple->docker; } if(tuple->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker)); } if(tuple->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr); } if(tuple->remote && tuple->remote->running_on != NULL) { node = tuple->remote->running_on->data; } else if (tuple->remote == NULL && rsc->running_on != NULL) { node = rsc->running_on->data; } common_print(rsc, pre_text, buffer, node, options, print_data); } void container_print(resource_t * rsc, const char *pre_text, long options, void *print_data) { container_variant_data_t *container_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (options & pe_print_xml) { container_print_xml(rsc, pre_text, options, print_data); return; } get_container_variant_data(container_data, rsc); if (pre_text == NULL) { pre_text = " "; } child_text = crm_strdup_printf(" %s", pre_text); status_print("%sDocker container%s: %s [%s]%s%s\n", pre_text, container_data->replicas>1?" set":"", rsc->id, container_data->image, is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(is_set(options, pe_print_clone_details)) { if(g_list_length(container_data->tuples) > 1) { status_print(" %sReplica[%d]\n", pre_text, tuple->offset); } if(tuple->ip) { tuple->ip->fns->print(tuple->ip, child_text, options, print_data); } if(tuple->docker) { tuple->docker->fns->print(tuple->docker, child_text, options, print_data); } if(tuple->remote) { tuple->remote->fns->print(tuple->remote, child_text, options, print_data); } if(tuple->child) { tuple->child->fns->print(tuple->child, child_text, options, print_data); } } else { char *child_text = crm_strdup_printf("%s ", pre_text); tuple_print(tuple, child_text, options, print_data); } } } void tuple_free(container_grouping_t *tuple) { if(tuple == NULL) { return; } // TODO: Free tuple->node ? if(tuple->ip) { tuple->ip->fns->free(tuple->ip); tuple->ip = NULL; } if(tuple->child) { tuple->child->fns->free(tuple->child); tuple->child = NULL; } if(tuple->docker) { tuple->docker->fns->free(tuple->docker); tuple->docker = NULL; } if(tuple->remote) { tuple->remote->fns->free(tuple->remote); tuple->remote = NULL; } free(tuple->ipaddr); free(tuple); } void container_free(resource_t * rsc) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); free(container_data->prefix); free(container_data->image); free(container_data->control_port); free(container_data->host_network); free(container_data->host_netmask); free(container_data->ip_range_start); free(container_data->docker_network); free(container_data->docker_run_options); free(container_data->docker_host_options); g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free); g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(container_data->ports, (GDestroyNotify)port_free); common_free(rsc); } enum rsc_role_e container_resource_state(const resource_t * rsc, gboolean current) { enum rsc_role_e container_role = RSC_ROLE_UNKNOWN; return container_role; } diff --git a/mcp/corosync.c b/mcp/corosync.c index 975b93c410..7502da7643 100644 --- a/mcp/corosync.c +++ b/mcp/corosync.c @@ -1,483 +1,490 @@ /* * Copyright (C) 2010 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include /* for calls to stat() */ #include /* For basename() and dirname() */ #include #include /* For getpwname() */ #include #include #include #if HAVE_CONFDB # include #endif #include #include #if SUPPORT_CMAN # include #endif #if HAVE_CMAP # include #endif enum cluster_type_e stack = pcmk_cluster_unknown; static corosync_cfg_handle_t cfg_handle; /* =::=::=::= CFG - Shutdown stuff =::=::=::= */ static void cfg_shutdown_callback(corosync_cfg_handle_t h, corosync_cfg_shutdown_flags_t flags) { crm_info("Corosync wants to shut down: %s", (flags == COROSYNC_CFG_SHUTDOWN_FLAG_IMMEDIATE) ? "immediate" : (flags == COROSYNC_CFG_SHUTDOWN_FLAG_REGARDLESS) ? "forced" : "optional"); /* Never allow corosync to shut down while we're running */ corosync_cfg_replyto_shutdown(h, COROSYNC_CFG_SHUTDOWN_FLAG_NO); } static corosync_cfg_callbacks_t cfg_callbacks = { .corosync_cfg_shutdown_callback = cfg_shutdown_callback, }; static int pcmk_cfg_dispatch(gpointer user_data) { corosync_cfg_handle_t *handle = (corosync_cfg_handle_t *) user_data; cs_error_t rc = corosync_cfg_dispatch(*handle, CS_DISPATCH_ALL); if (rc != CS_OK) { return -1; } return 0; } static void cfg_connection_destroy(gpointer user_data) { crm_err("Connection destroyed"); cfg_handle = 0; pcmk_shutdown(SIGTERM); } gboolean cluster_disconnect_cfg(void) { if (cfg_handle) { corosync_cfg_finalize(cfg_handle); cfg_handle = 0; } pcmk_shutdown(SIGTERM); return TRUE; } #define cs_repeat(counter, max, code) do { \ code; \ if(rc == CS_ERR_TRY_AGAIN || rc == CS_ERR_QUEUE_FULL) { \ counter++; \ crm_debug("Retrying operation after %ds", counter); \ sleep(counter); \ } else { \ break; \ } \ } while(counter < max) gboolean cluster_connect_cfg(uint32_t * nodeid) { cs_error_t rc; int fd = 0, retries = 0; static struct mainloop_fd_callbacks cfg_fd_callbacks = { .dispatch = pcmk_cfg_dispatch, .destroy = cfg_connection_destroy, }; cs_repeat(retries, 30, rc = corosync_cfg_initialize(&cfg_handle, &cfg_callbacks)); if (rc != CS_OK) { crm_err("corosync cfg init error %d", rc); return FALSE; } rc = corosync_cfg_fd_get(cfg_handle, &fd); if (rc != CS_OK) { crm_err("corosync cfg fd_get error %d", rc); goto bail; } retries = 0; cs_repeat(retries, 30, rc = corosync_cfg_local_get(cfg_handle, nodeid)); if (rc != CS_OK) { crm_err("corosync cfg local_get error %d", rc); goto bail; } crm_debug("Our nodeid: %d", *nodeid); mainloop_add_fd("corosync-cfg", G_PRIORITY_DEFAULT, fd, &cfg_handle, &cfg_fd_callbacks); return TRUE; bail: corosync_cfg_finalize(cfg_handle); return FALSE; } /* =::=::=::= Configuration =::=::=::= */ #if HAVE_CONFDB static int get_config_opt(confdb_handle_t config, hdb_handle_t object_handle, const char *key, char **value, const char *fallback) { size_t len = 0; char *env_key = NULL; const char *env_value = NULL; char buffer[256]; if (*value) { free(*value); *value = NULL; } if (object_handle > 0) { if (CS_OK == confdb_key_get(config, object_handle, key, strlen(key), &buffer, &len)) { *value = strdup(buffer); } } if (*value) { crm_info("Found '%s' for option: %s", *value, key); return 0; } env_key = crm_concat("HA", key, '_'); env_value = getenv(env_key); free(env_key); if (*value) { crm_info("Found '%s' in ENV for option: %s", *value, key); *value = strdup(env_value); return 0; } if (fallback) { crm_info("Defaulting to '%s' for option: %s", fallback, key); *value = strdup(fallback); } else { crm_info("No default for option: %s", key); } return -1; } static confdb_handle_t config_find_init(confdb_handle_t config) { cs_error_t rc = CS_OK; confdb_handle_t local_handle = OBJECT_PARENT_HANDLE; rc = confdb_object_find_start(config, local_handle); if (rc == CS_OK) { return local_handle; } else { crm_err("Couldn't create search context: %d", rc); } return 0; } static hdb_handle_t config_find_next(confdb_handle_t config, const char *name, confdb_handle_t top_handle) { cs_error_t rc = CS_OK; hdb_handle_t local_handle = 0; if (top_handle == 0) { crm_err("Couldn't search for %s: no valid context", name); return 0; } crm_trace("Searching for %s in " HDB_X_FORMAT, name, top_handle); rc = confdb_object_find(config, top_handle, name, strlen(name), &local_handle); if (rc != CS_OK) { crm_info("No additional configuration supplied for: %s", name); local_handle = 0; } else { crm_info("Processing additional %s options...", name); } return local_handle; } #else static int get_config_opt(uint64_t unused, cmap_handle_t object_handle, const char *key, char **value, const char *fallback) { int rc = 0, retries = 0; cs_repeat(retries, 5, rc = cmap_get_string(object_handle, key, value)); if (rc != CS_OK) { crm_trace("Search for %s failed %d, defaulting to %s", key, rc, fallback); if (fallback) { *value = strdup(fallback); } else { *value = NULL; } } crm_trace("%s: %s", key, *value); return rc; } #endif #if HAVE_CONFDB # define KEY_PREFIX "" #elif HAVE_CMAP # define KEY_PREFIX "logging." #endif gboolean mcp_read_config(void) { int rc = CS_OK; int retries = 0; const char *const_value = NULL; #if HAVE_CONFDB char *value = NULL; confdb_handle_t config = 0; confdb_handle_t top_handle = 0; hdb_handle_t local_handle; static confdb_callbacks_t callbacks = { }; do { rc = confdb_initialize(&config, &callbacks); if (rc != CS_OK) { retries++; printf("confdb connection setup failed: %s. Retrying in %ds\n", ais_error2text(rc), retries); crm_info("confdb connection setup failed: %s. Retrying in %ds", ais_error2text(rc), retries); sleep(retries); } else { break; } } while (retries < 5); #elif HAVE_CMAP cmap_handle_t local_handle; uint64_t config = 0; /* There can be only one (possibility if confdb isn't around) */ do { rc = cmap_initialize(&local_handle); if (rc != CS_OK) { retries++; printf("cmap connection setup failed: %s. Retrying in %ds\n", cs_strerror(rc), retries); crm_info("cmap connection setup failed: %s. Retrying in %ds", cs_strerror(rc), retries); sleep(retries); } else { break; } } while (retries < 5); #endif if (rc != CS_OK) { printf("Could not connect to Cluster Configuration Database API, error %d\n", rc); crm_warn("Could not connect to Cluster Configuration Database API, error %d", rc); return FALSE; } stack = get_cluster_type(); crm_info("Reading configure for stack: %s", name_for_cluster_type(stack)); /* =::=::= Should we be here =::=::= */ if (stack == pcmk_cluster_corosync) { set_daemon_option("cluster_type", "corosync"); set_daemon_option("quorum_type", "corosync"); #if HAVE_CONFDB } else if (stack == pcmk_cluster_cman) { set_daemon_option("cluster_type", "cman"); set_daemon_option("quorum_type", "cman"); enable_crmd_as_root(TRUE); } else if (stack == pcmk_cluster_classic_ais) { set_daemon_option("cluster_type", "openais"); set_daemon_option("quorum_type", "pcmk"); /* Look for a service block to indicate our plugin is loaded */ top_handle = config_find_init(config); local_handle = config_find_next(config, "service", top_handle); while (local_handle) { get_config_opt(config, local_handle, "name", &value, NULL); if (safe_str_eq("pacemaker", value)) { get_config_opt(config, local_handle, "ver", &value, "0"); if (safe_str_eq(value, "1")) { get_config_opt(config, local_handle, "use_logd", &value, "no"); set_daemon_option("use_logd", value); set_daemon_option("LOGD", value); get_config_opt(config, local_handle, "use_mgmtd", &value, "no"); enable_mgmtd(crm_is_true(value)); } else { crm_err("We can only start Pacemaker from init if using version 1" " of the Pacemaker plugin for Corosync. Terminating."); crm_exit(DAEMON_RESPAWN_STOP); } break; } local_handle = config_find_next(config, "service", top_handle); } free(value); #endif } else { crm_err("Unsupported stack type: %s", name_for_cluster_type(stack)); return FALSE; } #if HAVE_CONFDB top_handle = config_find_init(config); local_handle = config_find_next(config, "logging", top_handle); #endif /* =::=::= Logging =::=::= */ if (daemon_option("debug")) { /* Syslog logging is already setup by crm_log_init() */ } else { /* Check corosync */ char *debug_enabled = NULL; get_config_opt(config, local_handle, KEY_PREFIX "debug", &debug_enabled, "off"); if (crm_is_true(debug_enabled)) { set_daemon_option("debug", "1"); if (get_crm_log_level() < LOG_DEBUG) { set_crm_log_level(LOG_DEBUG); } } else { set_daemon_option("debug", "0"); } free(debug_enabled); } + /* If the user didn't explicitly configure a Pacemaker log file, check + * whether they configured a heartbeat or corosync log file, and use that. + * + * @COMPAT This should all go away, and we should just rely on the logging + * set up by crm_log_init(). We aren't doing this yet because it is a + * significant user-visible change that will need to be publicized. + */ const_value = daemon_option("debugfile"); if (daemon_option("logfile")) { /* File logging is already setup by crm_log_init() */ } else if(const_value) { /* From when we cared what options heartbeat used */ set_daemon_option("logfile", const_value); crm_add_logfile(const_value); } else { /* Check corosync */ char *logfile = NULL; char *logfile_enabled = NULL; get_config_opt(config, local_handle, KEY_PREFIX "to_logfile", &logfile_enabled, "on"); get_config_opt(config, local_handle, KEY_PREFIX "logfile", &logfile, "/var/log/pacemaker.log"); if (crm_is_true(logfile_enabled) == FALSE) { crm_trace("File logging disabled in corosync"); } else if (crm_add_logfile(logfile)) { set_daemon_option("logfile", logfile); } else { crm_err("Couldn't create logfile: %s", logfile); set_daemon_option("logfile", "none"); } free(logfile); free(logfile_enabled); } if (daemon_option("logfacility")) { /* Syslog logging is already setup by crm_log_init() */ } else { /* Check corosync */ char *syslog_enabled = NULL; char *syslog_facility = NULL; get_config_opt(config, local_handle, KEY_PREFIX "to_syslog", &syslog_enabled, "on"); get_config_opt(config, local_handle, KEY_PREFIX "syslog_facility", &syslog_facility, "daemon"); if (crm_is_true(syslog_enabled) == FALSE) { qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_FALSE); set_daemon_option("logfacility", "none"); } else { qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_FACILITY, qb_log_facility2int(syslog_facility)); qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_TRUE); set_daemon_option("logfacility", syslog_facility); } free(syslog_enabled); free(syslog_facility); } const_value = daemon_option("logfacility"); if (const_value) { /* cluster-glue module needs HA_LOGFACILITY */ setenv("HA_LOGFACILITY", const_value, 1); } #if HAVE_CONFDB confdb_finalize(config); #elif HAVE_CMAP if(local_handle){ gid_t gid = 0; if (crm_user_lookup(CRM_DAEMON_USER, NULL, &gid) < 0) { crm_warn("Could not authorize group with corosync " CRM_XS " No group found for user %s", CRM_DAEMON_USER); } else { char key[PATH_MAX]; snprintf(key, PATH_MAX, "uidgid.gid.%u", gid); rc = cmap_set_uint8(local_handle, key, 1); if (rc != CS_OK) { crm_warn("Could not authorize group with corosync "CRM_XS " group=%u rc=%d (%s)", gid, rc, ais_error2text(rc)); } } } cmap_finalize(local_handle); #endif return TRUE; } diff --git a/valgrind-pcmk.suppressions b/valgrind-pcmk.suppressions index a942f30248..48eec9e2af 100644 --- a/valgrind-pcmk.suppressions +++ b/valgrind-pcmk.suppressions @@ -1,315 +1,341 @@ # Valgrind suppressions for Pacemaker testing { Valgrind bug Memcheck:Addr8 fun:__strspn_sse42 fun:crm_get_msec } { Bash leak Memcheck:Leak fun:malloc fun:xmalloc fun:* fun:yyparse fun:parse_command fun:read_command fun:reader_loop fun:main } { Another bash leak Memcheck:Leak fun:malloc fun:xmalloc fun:set_default_locale fun:main } +{ + Bash leak in cli regression testing + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:xmalloc + obj:/usr/bin/bash + fun:execute_command_internal + obj:/usr/bin/bash + fun:execute_command_internal + fun:parse_and_execute + fun:command_substitute + obj:/usr/bin/bash + obj:/usr/bin/bash + obj:/usr/bin/bash + fun:expand_string_assignment + obj:/usr/bin/bash + obj:/usr/bin/bash + obj:/usr/bin/bash + obj:/usr/bin/bash + fun:execute_command_internal + fun:execute_command_internal + fun:execute_command + fun:execute_command_internal +} + { Ignore option parsing Memcheck:Leak fun:realloc fun:crm_get_option_long fun:main } { dlopen internals Memcheck:Leak fun:calloc fun:_dlerror_run fun:dlopen* fun:_log_so_walk_callback fun:dl_iterate_phdr fun:qb_log_init } { Cman - Points to uninitialized bytes Memcheck:Param socketcall.sendmsg(msg) fun:__sendmsg_nocancel obj:*/libcman.so.3.0 obj:*/libcman.so.3.0 } { Cman - Who cares if unused bytes are uninitialized Memcheck:Param sendmsg(msg) fun:__sendmsg_nocancel obj:*/libcman.so.3.0 obj:*/libcman.so.3.0 } { Cman - Jump or move depends on uninitialized values Memcheck:Cond obj:*/libcman.so.3.0 obj:*/libcman.so.3.0 } { quarks - hashtable Memcheck:Leak fun:calloc fun:g_malloc0 obj:*/libglib-* fun:g_slice_alloc fun:g_hash_table_new_full fun:g_quark_from_static_string } { quarks - hashtable 2 Memcheck:Leak fun:malloc fun:g_malloc fun:g_slice_alloc fun:g_hash_table_new_full fun:g_quark_from_static_string } { quarks - hashtable 3 Memcheck:Leak fun:calloc fun:g_malloc0 fun:g_hash_table_new_full fun:g_quark_from_static_string } { quarks - hashtable 4 Memcheck:Leak fun:malloc fun:realloc fun:g_realloc fun:g_quark_from_static_string } { glib mainloop internals - default Memcheck:Leak fun:calloc fun:g_malloc0 fun:g_main_context_new fun:g_main_context_default fun:g_main_loop_new fun:main } { glib mainloop internals - default Memcheck:Leak fun:malloc fun:g_malloc fun:g_slice_alloc fun:* fun:g_main_context_new fun:g_main_context_default fun:g_main_loop_new fun:main } { glib mainloop internals - default Memcheck:Leak fun:calloc fun:g_malloc0 obj:*/libglib-2.* fun:g_slice_alloc fun:g_ptr_array_sized_new fun:g_main_context_new fun:g_main_context_default fun:g_main_loop_new } { glib mainloop internals - run Memcheck:Leak fun:calloc fun:g_malloc0 fun:g_thread_self fun:g_main_loop_run } { glib mainloop internals - run Memcheck:Leak fun:malloc fun:g_malloc obj:*/libglib-2.* fun:g_main_loop_run } { glib mainloop internals - run Memcheck:Leak fun:malloc fun:realloc fun:g_realloc obj:*/libglib-2.* fun:g_ptr_array_add fun:g_main_context_check obj:*/libglib-2.* fun:g_main_loop_run } { glib mainloop internals - run Memcheck:Leak fun:malloc fun:g_malloc fun:g_slice_alloc fun:g_slice_alloc0 obj:*/libglib-2.* fun:g_main_context_dispatch obj:*/libglib-2.* fun:g_main_loop_run } { glib mainloop internals - run Memcheck:Leak fun:malloc fun:realloc fun:g_realloc obj:*/libglib-2.* fun:g_array_set_size fun:g_static_private_set obj:*/libglib-2.* fun:g_main_context_dispatch obj:*/libglib-2.* fun:g_main_loop_run } { glib mainloop internals - run Memcheck:Leak fun:malloc fun:g_malloc fun:g_slice_alloc fun:g_array_sized_new fun:g_static_private_set obj:*/libglib-2.* fun:g_main_context_dispatch obj:*/libglib-2.* fun:g_main_loop_run } { glib types Memcheck:Leak fun:malloc fun:realloc fun:g_realloc obj:*/libgobject-* fun:g_type_register_static } { glib types 2 Memcheck:Leak fun:realloc fun:g_realloc obj:*/libgobject-* fun:g_type_register_static fun:g_param_type_register_static } { glib types 3 Memcheck:Leak fun:calloc fun:g_malloc0 obj:*/libgobject-* fun:g_type_register_fundamental } { glib types 4 Memcheck:Leak fun:calloc fun:g_malloc0 obj:*/libgobject-* obj:*/libgobject-* fun:g_type_register_fundamental } { glib - the return Memcheck:Leak fun:calloc fun:g_malloc0 obj:*/libgobject-* obj:*/libgobject-* fun:_dl_init } { glib - seriously? Memcheck:Leak fun:calloc fun:g_malloc0 obj:*/libgobject-* obj:*/libgobject-* obj:*/libgobject-* fun:_dl_init } { glib - this is not funny anymore Memcheck:Leak fun:malloc fun:realloc fun:g_realloc obj:*/libgobject-* fun:g_type_register_fundamental } { glib - why do you hate me? Memcheck:Leak fun:calloc fun:g_malloc0 obj:*/libgobject-* obj:*/libgobject-* fun:call_init.part.0 fun:_dl_init } { dear glib - you suck at memory management Memcheck:Leak fun:calloc fun:g_malloc0 obj:*/libgobject-* obj:*/libgobject-* obj:*/libgobject-* fun:call_init.part.0 fun:_dl_init }