diff --git a/.gitignore b/.gitignore index 70dab3d3b0..70e69022cc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,251 +1,251 @@ # Common \#* .\#* GPATH GRTAGS GTAGS TAGS Makefile Makefile.in .deps .dirstamp .libs *.pc *.pyc *.bz2 *.tar.gz *.tgz *.la *.lo *.o *~ *.gcda *.gcno # Autobuild aclocal.m4 autoconf autoheader autom4te.cache/ automake build.counter compile /confdefs.h config.guess config.log config.status config.sub configure /conftest* depcomp install-sh include/stamp-* libtool libtool.m4 ltdl.m4 libltdl ltmain.sh missing py-compile /m4/argz.m4 /m4/ltargz.m4 /m4/ltoptions.m4 /m4/ltsugar.m4 /m4/ltversion.m4 /m4/lt~obsolete.m4 test-driver ylwrap # Configure targets /cts/CTS.py /cts/CTSlab.py /cts/CTSvars.py /cts/LSBDummy /cts/OCFIPraTest.py /cts/benchmark/clubench /cts/cluster_test /cts/cts /cts/cts-cli /cts/cts-coverage /cts/cts-exec /cts/cts-fencing /cts/cts-log-watcher /cts/cts-regression /cts/cts-scheduler /cts/cts-support /cts/fence_dummy /cts/lxc_autogen.sh /cts/pacemaker-cts-dummyd /cts/pacemaker-cts-dummyd@.service /daemons/execd/pacemaker_remote /daemons/execd/pacemaker_remote.service /daemons/fenced/fence_legacy /daemons/pacemakerd/pacemaker /daemons/pacemakerd/pacemaker.combined.upstart /daemons/pacemakerd/pacemaker.service /daemons/pacemakerd/pacemaker.upstart /doc/Doxyfile /extra/logrotate/pacemaker /extra/resources/ClusterMon /extra/resources/HealthSMART /extra/resources/SysInfo /extra/resources/ifspeed /extra/resources/o2cb include/config.h include/config.h.in include/crm_config.h -publican.cfg /tools/cibsecret /tools/crm_error /tools/crm_failcount /tools/crm_master /tools/crm_mon.service /tools/crm_mon.upstart /tools/crm_report /tools/crm_rule /tools/crm_standby /tools/pcmk_simtimes /tools/report.collector /tools/report.common # Build targets *.7 *.7.xml *.7.html *.8 *.8.xml *.8.html /daemons/attrd/pacemaker-attrd /daemons/based/pacemaker-based /daemons/based/cibmon /daemons/controld/pacemaker-controld /daemons/execd/cts-exec-helper /daemons/execd/pacemaker-execd /daemons/execd/pacemaker-remoted /daemons/fenced/cts-fence-helper /daemons/fenced/pacemaker-fenced /daemons/fenced/pacemaker-fenced.xml /daemons/pacemakerd/pacemakerd /daemons/schedulerd/pacemaker-schedulerd /daemons/schedulerd/pacemaker-schedulerd.xml -/doc/*/tmp/** -/doc/*/publish -/doc/*.build -/doc/*/en-US/Ap-*.xml -/doc/*/en-US/Ch-*.xml /doc/.ABI-build /doc/HTML /doc/abi_dumps /doc/abi-check /doc/api/* /doc/compat_reports /doc/crm_fencing.html -/doc/publican-catalog* -/doc/shared/en-US/*.xml /doc/sphinx/*/_build /doc/sphinx/*/conf.py /doc/sphinx/shared/images/*.png /lib/common/md5.c /maint/testcc_helper.cc /maint/testcc_*_h /maint/mocked/based scratch /tools/attrd_updater /tools/cibadmin /tools/crmadmin /tools/crm_attribute /tools/crm_diff /tools/crm_mon /tools/crm_node /tools/crm_resource /tools/crm_shadow /tools/crm_simulate /tools/crm_ticket /tools/crm_verify /tools/iso8601 /tools/stonith_admin xml/crm.dtd xml/pacemaker*.rng xml/versions.rng xml/api/api-result*.rng lib/gnu/libgnu.a lib/gnu/stdalign.h *.coverity # Packager artifacts *.rpm /mock /pacemaker.spec /rpm/[A-Z]* # make dist/export working directory pacemaker-[a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9] # Test detritus /cts/.regression.failed.diff /cts/scheduler/*.ref /cts/scheduler/*.up /cts/scheduler/*.up.err /cts/scheduler/bug-rh-1097457.log /cts/scheduler/bug-rh-1097457.trs /cts/scheduler/shadow.* /cts/test-suite.log /lib/*/tests/*/*.log /lib/*/tests/*/*_test /lib/*/tests/*/*.trs /xml/test-*/*.up /xml/test-*/*.up.err /xml/assets/*.rng /xml/assets/diffview.js /xml/assets/xmlcatalog # Release maintenance detritus /maint/gnulib # Formerly built files (helps when jumping back and forth in checkout) /.ABI-build /Doxyfile /HTML /abi_dumps /abi-check /compat_reports /attrd /cib /coverage.sh /crmd /cts/HBDummy +/doc/*.build +/doc/*/en-US/Ap-*.xml +/doc/*/en-US/Ch-*.xml +/doc/*/publican.cfg +/doc/*/publish +/doc/*/tmp/** /doc/Clusters_from_Scratch.txt /doc/Pacemaker_Explained.txt /doc/acls.html +/doc/publican-catalog* +/doc/shared/en-US/*.xml /doc/shared/en-US/images/pcmk-*.png /doc/shared/en-US/images/Policy-Engine-*.png /fencing /lib/common/tests/flags/pcmk__clear_flags_as /lib/common/tests/flags/pcmk__set_flags_as /lib/common/tests/flags/pcmk_all_flags_set /lib/common/tests/flags/pcmk_any_flags_set /lib/common/tests/operations/parse_op_key /lib/common/tests/strings/pcmk__btoa /lib/common/tests/strings/pcmk__parse_ll_range /lib/common/tests/strings/pcmk__scan_double /lib/common/tests/strings/pcmk__str_any_of /lib/common/tests/strings/pcmk__strcmp /lib/common/tests/strings/pcmk__char_in_any_str /lib/common/tests/utils/pcmk_str_is_infinity /lib/common/tests/utils/pcmk_str_is_minus_infinity /lib/pengine/tests/rules/pe_cron_range_satisfied /lrmd /mcp /pacemaker-*.spec /pengine #Other coverity-* logs *.patch *.diff *.sed *.orig *.rej *.swp diff --git a/INSTALL.md b/INSTALL.md index d61b9f0e84..dc4fe00805 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,77 +1,76 @@ # How to Install Pacemaker ## Build Dependencies | Version | Fedora-based | Suse-based | Debian-based | |:---------------:|:------------------:|:------------------:|:--------------:| | 1.11 or later | automake | automake | automake | | 2.64 or later | autoconf | autoconf | autoconf | | | libtool | libtool | libtool | | | libtool-ltdl-devel | | libltdl-dev | | | libuuid-devel | libuuid-devel | uuid-dev | | | pkgconfig | pkgconfig | pkg-config | | 2.16.0 or later | glib2-devel | glib2-devel | libglib2.0-dev | | | libxml2-devel | libxml2-devel | libxml2-dev | | | libxslt-devel | libxslt-devel | libxslt-dev | | | bzip2-devel | libbz2-devel | libbz2-dev | | | libqb-devel | libqb-devel | libqb-dev | Also: GNU make, and Python 2.7 or Python 3.2 or later ### Cluster Stack Dependencies *Only corosync is currently supported* | Version | Fedora-based | Suse-based | Debian-based | |:---------------:|:------------------:|:------------------:|:--------------:| | 2.0.0 or later | corosynclib | libcorosync | corosync | | 2.0.0 or later | corosynclib-devel | libcorosync-devel | | | | | | libcfg-dev | | | | | libcpg-dev | | | | | libcmap-dev | | | | | libquorum-dev | ### Optional Build Dependencies | Feature Enabled | Version | Fedora-based | Suse-based | Debian-based | |:-----------------------------------------------:|:--------------:|:-----------------------:|:-----------------------:|:-----------------------:| | Pacemaker Remote and encrypted remote CIB admin | 2.1.7 or later | gnutls-devel | libgnutls-devel | libgnutls-dev | | encrypted remote CIB admin | | pam-devel | pam-devel | libpam0g-dev | | interactive crm_mon | | ncurses-devel | ncurses-devel | ncurses-dev | | systemd support | | systemd-devel | systemd-devel | libsystemd-dev | | systemd/upstart resource support | | dbus-devel | dbus-devel | libdbus-1-dev | | Linux-HA style fencing agents | | cluster-glue-libs-devel | libglue-devel | cluster-glue-dev | | documentation | | asciidoc or asciidoctor | asciidoc or asciidoctor | asciidoc or asciidoctor | | documentation | | help2man | help2man | help2man | -| documentation | | publican | | publican | | documentation | | inkscape | inkscape | inkscape | | documentation | | docbook-style-xsl | docbook-xsl-stylesheets | docbook-xsl | | documentation | | python-sphinx or python3-sphinx | python-sphinx or python3-sphinx | python-sphinx or python3-sphinx | | documentation (PDF) | | texlive, texlive-titlesec, texlive-framed, texlive-threeparttable texlive-wrapfig texlive-multirow | texlive, texlive-latex | texlive, texlive-latex-extra | ## Optional testing dependencies * valgrind (if running CTS valgrind tests) * systemd-python (if using CTS on cluster nodes running systemd) * rsync (if running CTS container tests) * libvirt-daemon-driver-lxc (if running CTS container tests) * libvirt-daemon-lxc (if running CTS container tests) * libvirt-login-shell (if running CTS container tests) * nmap (if not specifying an IP address base) * oprofile (if running CTS profiling tests) * dlm (to log DLM debugging info after CTS tests) ## Simple install $ make && sudo make install If GNU make is not your default make, use "gmake" instead. ## Detailed install First, browse the build options that are available: $ ./autogen.sh $ ./configure --help Re-run ./configure with any options you want, then proceed with the simple method. diff --git a/Makefile.am b/Makefile.am index 0765057761..b6dcb849fb 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,127 +1,114 @@ # # Copyright 2003-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # m4/glibtests.m4 is copied from https://gitlab.gnome.org/GNOME/glib/blob/master/m4macros/glibtests.m4. EXTRA_DIST = CONTRIBUTING.md \ GNUmakefile \ INSTALL.md \ README.markdown \ autogen.sh \ m4/glibtests.m4 \ m4/gnulib-cache.m4 \ m4/gnulib-tool.m4 \ rpm/rpmlintrc \ rpm/pacemaker.spec.in DISTCLEANFILES = config.status MAINTAINERCLEANFILES = Makefile.in \ aclocal.m4 \ config.guess \ config.sub \ configure \ depcomp \ install-sh \ ltmain.sh \ missing \ py-compile \ test-driver -# Disable building Publican documentation when doing "make distcheck", because -# some of our book sources are in the source directory, while others are -# dynamically generated in the build directory, and publican can't handle that. -# -# In a non-VPATH build, doc isn't entered with a plain "make" because the -# GNUmakefile sets "core" as the default target. However in a VPATH build, -# there is no GNUmakefile, so "all" becomes the default target. -# -# Also, don't try to install files outside the build directory. -# -# @TODO To support VPATH builds for Publican, we could use the same "copy all -# static inputs into the build tree" trick that xml/Makefile.am uses for -# static schema files. -AM_DISTCHECK_CONFIGURE_FLAGS = --with-brand="" \ - --prefix="$$dc_install_base/usr" \ +# Don't try to install files outside build directory for "make distcheck". +AM_DISTCHECK_CONFIGURE_FLAGS = --prefix="$$dc_install_base/usr" \ --sysconfdir="$$dc_install_base/etc" \ --with-initdir="$$dc_install_base/etc/init.d" \ --with-ocfdir="$$dc_install_base/usr/lib/ocf" \ --with-systemdsystemunitdir="$$dc_install_base$(systemdsystemunitdir)" # Only these will get installed with a plain "make install" CORE_INSTALL = replace include lib daemons tools xml # Only these will get built with a plain "make" or "make clean" CORE = $(CORE_INSTALL) cts SUBDIRS = $(CORE) devel doc extra maint tests AM_CPPFLAGS = -I$(top_srcdir)/include doc_DATA = README.markdown COPYING licensedir = $(docdir)/licenses/ dist_license_DATA = $(wildcard licenses/*) # Scratch file for ad-hoc testing EXTRA_PROGRAMS = scratch nodist_scratch_SOURCES = scratch.c scratch_LDADD = $(top_builddir)/lib/common/libcrmcommon.la core: @echo "Building only core components and tests: $(CORE)" @for subdir in $(CORE); do \ echo "Building $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir all || exit 1; \ done core-install: @echo "Installing only core components: $(CORE_INSTALL)" @for subdir in $(CORE_INSTALL); do \ echo "Installing $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir install || exit 1; \ done core-clean: @echo "Cleaning only core components and tests: $(CORE)" @for subdir in $(CORE); do \ echo "Cleaning $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir clean || exit 1; \ done install-exec-local: $(INSTALL) -d -m 750 $(DESTDIR)/$(PACEMAKER_CONFIG_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CONFIG_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CORE_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_BLACKBOX_DIR) $(INSTALL) -d -m 770 $(DESTDIR)/$(CRM_LOG_DIR) $(INSTALL) -d -m 770 $(DESTDIR)/$(CRM_BUNDLE_DIR) -chgrp $(CRM_DAEMON_GROUP) $(DESTDIR)/$(PACEMAKER_CONFIG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CONFIG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CORE_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_BLACKBOX_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_LOG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_BUNDLE_DIR) # Use chown because the user/group may not exist clean-generic: -rm -f *.tar.bz2 *.sed PACKAGE ?= pacemaker # In a normal build, this file is included by GNUmakefile, which serves as the # "real" makefile. But in a VPATH build, GNUmakefile won't exist in the build # tree, and this file will be the "real" makefile. EXTRA_CLEAN_TARGETS handles # both cases: GNUmakefile defines it before including this file, so the # clean-local target can clean up files created by GNUmakefile targets. # If this file is used alone, the variable will be undefined. clean-local: $(EXTRA_CLEAN_TARGETS) -rm -f scratch $(builddir)/$(PACKAGE)-*.tar.gz distclean-local: -rm -rf libltdl autom4te.cache diff --git a/configure.ac b/configure.ac index 69398fba4d..62434a368d 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2127 +1,2087 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2020 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) AC_CONFIG_MACRO_DIR([m4]) AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) dnl 1.11: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl silent-rules: allow "--enable-silent-rules" (no-op in 1.13+) dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.11 foreign tar-ustar silent-rules subdir-objects]) dnl Require pkg-config (with a minimum version) PKG_PROG_PKG_CONFIG(0.18) AS_IF([test "x${PKG_CONFIG}" != x], [], [AC_MSG_ERROR([pkgconfig must be installed to build ${PACKAGE}])]) dnl PKG_NOARCH_INSTALLDIR is not available prior to pkg-config 0.27 and dnl pkgconf 0.8.10 (uncomment next line to mimic that scenario) dnl m4_ifdef([PKG_NOARCH_INSTALLDIR], [m4_undefine([PKG_NOARCH_INSTALLDIR])]) m4_ifndef([PKG_NOARCH_INSTALLDIR], [ AC_DEFUN([PKG_NOARCH_INSTALLDIR], [ AC_SUBST([noarch_pkgconfigdir], ['${datadir}/pkgconfig']) ]) ]) PKG_NOARCH_INSTALLDIR dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ dnl Versioned attributes implementation is not yet production-ready AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes]) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd GLIB_TESTS dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AC_PROG_CC_STDC AC_PROG_CXX dnl C++ is not needed for build, just maintainer utilities dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs: dnl "The macro gl_EARLY must be called as soon as possible after verifying that dnl the C compiler is working. ... The core part of the gnulib checks are done dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL dnl as a dependency. gl_EARLY gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT # --enable-new-dtags: Use RUNPATH instead of RPATH. # It is necessary to have this done before libtool does linker detection. # See also: https://github.com/kronosnet/kronosnet/issues/107 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags], [AM_LDFLAGS=-Wl,--enable-new-dtags], [AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])]) AC_SUBST([AM_LDFLAGS]) saved_LDFLAGS="$LDFLAGS" LDFLAGS="$AM_LDFLAGS $LDFLAGS" LT_INIT([dlopen]) LDFLAGS="$saved_LDFLAGS" LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)], [RC=1; AC_MSG_RESULT(no)]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } dnl =============================================== dnl Configure Options dnl =============================================== dnl Actual library checks come later, but pkg-config can be used here to grab dnl external values to use as defaults for configure options dnl --enable-* options AC_ARG_ENABLE([ansi], [AS_HELP_STRING([--enable-ansi], [force GCC to compile to ANSI standard for older compilers. @<:@no@:>@])], ) AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@yes@:>@])], ) AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])], ) AC_ARG_ENABLE([no-stack], [AS_HELP_STRING([--enable-no-stack], [build only the scheduler and its requirements @<:@no@:>@])], ) AC_ARG_ENABLE([upstart], [AS_HELP_STRING([--enable-upstart], [enable support for managing resources via Upstart @<:@try@:>@])], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])], [], [enable_systemd=try], ) AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) # By default, we add symlinks at the pre-2.0.0 daemon name locations, so that: # (1) tools that directly invoke those names for metadata etc. will still work # (2) this installation can be used in a bundle container image used with # cluster hosts running Pacemaker 1.1.17+ # If you know your target systems will not have any need for it, you can # disable this option. Once the above use cases are no longer in wide use, we # can disable this option by default, and once we no longer want to support # them at all, we can drop the option altogether. AC_ARG_ENABLE([legacy-links], [AS_HELP_STRING([--enable-legacy-links], [add symlinks for old daemon names @<:@yes@:>@])], [ LEGACY_LINKS="${enableval}" ], [ LEGACY_LINKS=yes ], ) AM_CONDITIONAL(BUILD_LEGACY_LINKS, test "x${LEGACY_LINKS}" = "xyes") dnl --with-* options AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACKAGE_VERSION="$withval" ])] ) VERSION_ARG(VERSION_NUMBER) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer])], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios remote monitoring])], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) AC_ARG_WITH([acl], [AS_HELP_STRING([--with-acl], [support CIB ACL])], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets])], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) PCMK_GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], [directory for init (rc) scripts])], [ INITDIR="$withval" ] ) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) SUPPORT_PROFILING=0 AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations for effective profiling])], [ SUPPORT_PROFILING=$withval ] ) AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations for effective profiling])], [ SUPPORT_COVERAGE=$withval ] ) -PUBLICAN_BRAND="common" -AC_ARG_WITH([brand], - [AS_HELP_STRING([--with-brand=brand], - [brand to use for generated documentation (set empty for no docs) @<:@common@:>@])], - [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ] -) -AC_SUBST(PUBLICAN_BRAND) - BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], [address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@])], [ BUG_URL="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) AC_ARG_WITH([sanitizers], [AS_HELP_STRING([--with-sanitizers=...,...], [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])], [ SANITIZERS="$withval" ], [ SANITIZERS="" ]) dnl The not-yet-released autoconf 2.70 will have a --runstatedir option. dnl Until that's available, emulate it with our own --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) dnl This defaults to /usr/lib rather than libdir because it's determined by the dnl OCF project and not pacemaker. Even if a user wants to install pacemaker to dnl /usr/local or such, the OCF agents will be expected in their usual dnl location. However, we do give the user the option to override it. OCF_ROOT_DIR="/usr/lib/ocf" AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], [OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@/usr/lib/ocf@:>@])], [ OCF_ROOT_DIR="$withval" ] ) AC_SUBST(OCF_ROOT_DIR) dnl Get default from fence-agents if available PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix], [PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"], [PCMK__FENCE_BINDIR="$sbindir"]) AC_ARG_WITH([fence-bindir], [AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([ directory for executable fence agents @<:@value from fence-agents package if available otherwise SBINDIR@:>@]))], [ PCMK__FENCE_BINDIR="$withval" ] ) AC_SUBST(PCMK__FENCE_BINDIR) CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) dnl Deprecated options AC_ARG_WITH([pkg-name], [AS_HELP_STRING([--with-pkg-name=name], [deprecated and unused (will be removed in a future release)])], ) AC_ARG_WITH([pkgname], [AS_HELP_STRING([--with-pkgname=name], [deprecated and unused (will be removed in a future release)])], ) dnl =============================================== dnl General Processing dnl =============================================== AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", [Current pacemaker version]) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) AC_PROG_LN_S AC_PROG_MKDIR_P if cc_supports_flag -Werror; then WERROR="-Werror" else WERROR="" fi # Normalize enable_fatal_warnings (defaulting to yes, when compiler supports it) if test "x${enable_fatal_warnings}" != "xno" ; then if test "$GCC" = "yes" && test "x${WERROR}" != "x" ; then enable_fatal_warnings=yes else AC_MSG_NOTICE(Compiler does not support fatal warnings) enable_fatal_warnings=no fi fi INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in prefix|NONE) exec_prefix=$prefix ;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR) ;; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in prefix|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables if [ test "x${runstatedir}" = "x" ]; then if [ test "x${pcmk_runstatedir}" = "x" ]; then runstatedir="${localstatedir}/run" else runstatedir="${pcmk_runstatedir}" fi fi eval runstatedir="$(eval echo ${runstatedir})" AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) if test x"${CRM_LOG_DIR}" = x""; then CRM_LOG_DIR="${localstatedir}/log/pacemaker" fi AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) if test x"${CRM_BUNDLE_DIR}" = x""; then CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles" fi AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) eval PCMK__FENCE_BINDIR="`eval echo ${PCMK__FENCE_BINDIR}`" AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR", [Location for executable fence agents]) if test x"${PCMK_GNUTLS_PRIORITIES}" = x""; then AC_MSG_ERROR([Empty string not applicable with --with-gnutls-priorities]) fi AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) if test x"${BUG_URL}" = x""; then BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker" fi AC_SUBST(BUG_URL) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([US_AUTH_PEERCRED_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([US_AUTH_PEERCRED_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) if test -z "${us_auth}"; then # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([US_AUTH_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([US_AUTH_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_ERROR([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) fi dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) INIT_EXT=".sh" ;; *solaris*) AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac ;; esac # C99 doesn't guarantee uint64_t type and related format specifiers, but # prerequisites, corosync + libqb, use that widely, so the target platforms # are already pre-constrained to those "64bit-clean" (doesn't imply native # bit width) and hence we deliberately refrain from artificial surrogates # (sans manipulation through cached values). AC_CACHE_VAL( [pcmk_cv_decl_inttypes], [ AC_CHECK_DECLS( [PRIu64, PRIu32, PRIx32, SCNu64], [pcmk_cv_decl_inttypes="PRIu64 PRIu32 PRIx32 SCNu64"], [ # test shall only react on "no" cached result & error out respectively if test "x$ac_cv_have_decl_PRIu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint64_t (PRIu64)]) elif test "x$ac_cv_have_decl_PRIu32" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint32_t (PRIu32)]) elif test "x$ac_cv_have_decl_PRIx32" = xno; then AC_MSG_ERROR([lack of inttypes.h based hexa specifier serving uint32_t (PRIx32)]) elif test "x$ac_cv_have_decl_SCNu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier gathering uint64_t (SCNu64)]) fi ], [[#include ]] ) ] ) ( set $pcmk_cv_decl_inttypes AC_DEFINE_UNQUOTED([U64T], [$1], [Correct format specifier for U64T]) AC_DEFINE_UNQUOTED([U32T], [$2], [Correct format specifier for U32T]) AC_DEFINE_UNQUOTED([X32T], [$3], [Correct format specifier for X32T]) AC_DEFINE_UNQUOTED([U64TS], [$4], [Correct format specifier for U64TS]) ) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path if test x"${PYTHON}" != x""; then AC_PATH_PROG([PYTHON], [$PYTHON]) fi case "x$PYTHON" in x*python3*|x*platform-python*) dnl When used with Python 3, Pacemaker requires a minimum of 3.2 AM_PATH_PYTHON([3.2]) ;; *) dnl Otherwise, Pacemaker requires a minimum of 2.7 AM_PATH_PYTHON([2.7]) ;; esac AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) -AC_PATH_PROG([PUBLICAN], [publican]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) dnl BASH is already an environment variable, so use something else AC_PATH_PROG([BASH_PATH], [bash]) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi dnl Bash is needed for building man pages and running regression tests if test x"${BASH_PATH}" = x""; then AC_MSG_ERROR(bash must be installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) if test "x${ASCIIDOC_CONV}" != x; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi -publican_intree_brand=no -if test x"${PUBLICAN_BRAND}" != x"" \ - && test x"${PUBLICAN}" != x"" \ - && test x"${INKSCAPE}" != x""; then - - dnl special handling for clusterlabs brand (possibly in-tree version used) - test "${PUBLICAN_BRAND}" != "clusterlabs" \ - || test -d /usr/share/publican/Common_Content/clusterlabs - if test $? -ne 0; then - dnl Unknown option: brand_dir vs. Option brand_dir requires an argument - if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then - AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common]) - PUBLICAN_BRAND=common - else - publican_intree_brand=yes - fi - fi - AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand]) - PCMK_FEATURES="$PCMK_FEATURES publican-docs" -fi -AM_CONDITIONAL([BUILD_DOCBOOK], - [test x"${PUBLICAN_BRAND}" != x"" \ - && test x"${PUBLICAN}" != x"" \ - && test x"${INKSCAPE}" != x""]) -AM_CONDITIONAL([PUBLICAN_INTREE_BRAND], - [test x"${publican_intree_brand}" = x"yes"]) AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH; do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then $GETOPT_PATH -T >/dev/null 2>/dev/null if test $? -eq 4; then break fi fi GETOPT_PATH="" done IFS=$IFS_orig if test -n "$GETOPT_PATH"; then AC_MSG_RESULT([$GETOPT_PATH]) else AC_MSG_RESULT([no]) AC_MSG_ERROR(Pacemaker build requires a GNU-compatible getopt) fi AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolerably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolerably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_FUNCS([sched_setscheduler]) if test "$ac_cv_func_sched_setscheduler" != yes; then PC_LIBS_RT="" else PC_LIBS_RT="-lrt" fi AC_SUBST(PC_LIBS_RT) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi # Require glib 2.16.0 (2008-03) or later for g_hash_table_iter_init() etc. PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.16.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) if test "x${enable_fatal_warnings}" = xyes ; then cc_temp_flags "$CFLAGS $WERROR" fi AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/swab.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/reboot.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/signalfd.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) if test "x${enable_fatal_warnings}" = xyes ; then cc_restore_flags fi dnl These headers need prerequisites before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) AC_CHECK_HEADERS(libxml/xpath.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(libxml development headers not found) fi AC_CHECK_LIB(xslt, xsltApplyStylesheet, [], AC_MSG_ERROR(Unsupported libxslt library version)) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(libxslt development headers not found) fi AC_CACHE_CHECK(whether __progname and __progname_full are available, pf_cv_var_progname, AC_TRY_LINK([extern char *__progname, *__progname_full;], [__progname = "foo"; __progname_full = "foo bar";], pf_cv_var_progname="yes", pf_cv_var_progname="no")) if test "$pf_cv_var_progname" = "yes"; then AC_DEFINE(HAVE___PROGNAME,1,[ ]) fi dnl ======================================================================== dnl Generic declarations dnl ======================================================================== AC_CHECK_DECLS([CLOCK_MONOTONIC], [], [], [[ #include ]]) dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) if test "$pf_cv_var_sscanf" = "yes"; then AC_DEFINE(SSCANF_HAS_M, 1, [ ]) fi dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_TRY_COMPILE([#include ],[sighandler_t *f;], has_sighandler_t=yes,has_sighandler_t=no) AC_MSG_RESULT($has_sighandler_t) if test "$has_sighandler_t" = "yes" ; then AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual; then ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" cc_temp_flags "-Wcast-qual $WERROR" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 if cc_supports_flag -fPIC; then CFLAGS="$CFLAGS -fPIC" fi AC_MSG_CHECKING(whether printw() requires argument of "const char *") AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [pcmk_cv_compatible_printw=yes], [pcmk_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS cc_restore_flags AC_MSG_RESULT([$pcmk_cv_compatible_printw]) if test "$pcmk_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0 ;; esac AC_MSG_NOTICE(New CFLAGS: $CFLAGS) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== if test x${enable_no_stack} = xyes; then SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb >= 0.13) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 2.02+ (2020-10) AC_CHECK_FUNCS(qb_ipcc_auth_get, AC_DEFINE(HAVE_IPCC_AUTH_GET, 1, [Have qb_ipcc_auth_get function])) PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" dnl libqb 0.17.0+ (2014-02) AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function])) dnl libqb 2.0.0+ (2020-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_CHECK_HEADERS(stonith/stonith.h) if test "$ac_cv_header_stonith_stonith_h" = "yes"; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) if test x"${CRM_DAEMON_USER}" = x""; then CRM_DAEMON_USER="hacluster" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) if test x"${CRM_DAEMON_GROUP}" = x""; then CRM_DAEMON_GROUP="haclient" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) OCF_RA_DIR="$OCF_ROOT_DIR/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries]) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test $HAVE_dbus = 0; then PC_NAME_DBUS="" else PC_NAME_DBUS="dbus-1" fi AC_SUBST(PC_NAME_DBUS) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without DBus]) else enable_systemd=no fi fi if test $(echo "$CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without clock_gettime(CLOCK_MONOTONIC, ...)]) else enable_systemd=no fi fi if test "x${enable_systemd}" = xtry; then AC_MSG_CHECKING([for systemd version query result via dbus-send]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || systemctl --version 2>/dev/null | grep -q systemd; then enable_systemd=yes else enable_systemd=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) HAVE_systemd=0 if test "x${enable_systemd}" = xyes; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) if test "x${systemdsystemunitdir}" = x""; then AC_MSG_FAILURE([cannot enable systemd when systemdsystemunitdir unresolved]) fi fi AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then AC_MSG_FAILURE([cannot enable Upstart without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then AC_MSG_CHECKING([for Upstart version query result via dbus-send]) ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) HAVE_upstart=0 if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) case $SUPPORT_NAGIOS in 1|yes|true) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then AC_MSG_FAILURE([cannot enable nagios without clock_gettime(CLOCK_MONOTONIC, ...)]) fi SUPPORT_NAGIOS=1 ;; try) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then SUPPORT_NAGIOS=0 else SUPPORT_NAGIOS=1 fi ;; *) SUPPORT_NAGIOS=0 ;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_CS=no ;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS) SUPPORT_CS=1 PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal PKG_CHECK_MODULES(libcorosync_common, libcorosync_common) dnl Fatal CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-native" fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AC_SUBST(SUPPORT_COROSYNC) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported, building only the scheduler) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_ACL=no ;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) AC_CHECK_FUNCS(qb_ipcs_connection_auth_set, SUPPORT_ACL=1, SUPPORT_ACL=0) if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1 ;; *) SUPPORT_CIBSECRETS=0 ;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) if test $SUPPORT_CIBSECRETS = 1; then PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) fi dnl ======================================================================== dnl GnuTLS dnl ======================================================================== dnl gnutls_priority_set_direct available since 2.1.7 (released 2007-11-29) AC_CHECK_LIB(gnutls, gnutls_priority_set_direct) if test "$ac_cv_lib_gnutls_gnutls_priority_set_direct" != ""; then AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_FUNCS([gnutls_sec_param_to_pk_bits]) dnl since 2.12.0 (2011-03-24) if test "$ac_cv_header_gnutls_gnutls_h" != "yes"; then PC_NAME_GNUTLS="" else PC_NAME_GNUTLS="gnutls" fi AC_SUBST(PC_NAME_GNUTLS) fi dnl ======================================================================== dnl PAM dnl ======================================================================== AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKG_CONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKG_CONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") # --- ASAN/UBSAN/TSAN (see man gcc) --- # when using SANitizers, we need to pass the -fsanitize.. # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be # specified as first in the list or there will be runtime # issues (for example user has to LD_PRELOAD asan for it to work # properly). if test -n "${SANITIZERS}"; then SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g') for SANITIZER in $SANITIZERS; do case $SANITIZER in asan|ASAN) SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan" AC_CHECK_LIB([asan],[main],,AC_MSG_ERROR([Unable to find libasan])) ;; ubsan|UBSAN) SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan" AC_CHECK_LIB([ubsan],[main],,AC_MSG_ERROR([Unable to find libubsan])) ;; tsan|TSAN) SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan" AC_CHECK_LIB([tsan],[main],,AC_MSG_ERROR([Unable to find libtsan])) ;; esac done fi dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" else CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags if cc_supports_flag "-Wformat-nonliteral"; then gcc_format_nonliteral=yes else gcc_format_nonliteral=no fi # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat-security -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wwrite-strings -Wunused-but-set-variable -Wunsigned-char" if test "x$gcc_diagnostic_push_pull" = "xyes"; then AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" else if test "x$gcc_format_nonliteral" = "xyes"; then EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" fi fi # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $CC_EXTRAS $j then CC_EXTRAS="$CC_EXTRAS $j" fi done if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl cib, stonithd, pacemaker-remoted, pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi if test "x${HARDENING}" = "xno"; then AC_MSG_NOTICE([Hardening: explicitly disabled]) elif test "x${HARDENING}" = "xyes" \ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then dnl We'll figure out on our own... CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; relro=1]) # daemons: PIE for both CFLAGS and LDFLAGS if cc_supports_flag -fPIE; then flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; pie=1]) fi # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries if test "${relro}" = 1 && test "${pie}" = 1; then flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; bindnow=1]) fi if test "${bindnow}" = 1; then flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"]) fi # universal: prefer strong > all > default stack protector if possible flag= if cc_supports_flag -fstack-protector-strong; then flag="-fstack-protector-strong" elif cc_supports_flag -fstack-protector-all; then flag="-fstack-protector-all" elif cc_supports_flag -fstack-protector; then flag="-fstack-protector" fi if test -n "${flag}"; then CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 fi if test "${relro}" = 1 \ || test "${pie}" = 1 \ || test "${stackprot}" = 1; then AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) else AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) fi else AC_MSG_NOTICE([Hardening: using custom flags]) fi CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS" LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS" CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE" LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS $WERROR" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant fi AC_MSG_RESULT(Suppress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl Files we output that need to be executable AC_CONFIG_FILES([cts/CTSlab.py], [chmod +x cts/CTSlab.py]) AC_CONFIG_FILES([cts/LSBDummy], [chmod +x cts/LSBDummy]) AC_CONFIG_FILES([cts/OCFIPraTest.py], [chmod +x cts/OCFIPraTest.py]) AC_CONFIG_FILES([cts/cluster_test], [chmod +x cts/cluster_test]) AC_CONFIG_FILES([cts/cts], [chmod +x cts/cts]) AC_CONFIG_FILES([cts/cts-cli], [chmod +x cts/cts-cli]) AC_CONFIG_FILES([cts/cts-coverage], [chmod +x cts/cts-coverage]) AC_CONFIG_FILES([cts/cts-exec], [chmod +x cts/cts-exec]) AC_CONFIG_FILES([cts/cts-fencing], [chmod +x cts/cts-fencing]) AC_CONFIG_FILES([cts/cts-log-watcher], [chmod +x cts/cts-log-watcher]) AC_CONFIG_FILES([cts/cts-regression], [chmod +x cts/cts-regression]) AC_CONFIG_FILES([cts/cts-scheduler], [chmod +x cts/cts-scheduler]) AC_CONFIG_FILES([cts/cts-support], [chmod +x cts/cts-support]) AC_CONFIG_FILES([cts/lxc_autogen.sh], [chmod +x cts/lxc_autogen.sh]) AC_CONFIG_FILES([cts/benchmark/clubench], [chmod +x cts/benchmark/clubench]) AC_CONFIG_FILES([cts/fence_dummy], [chmod +x cts/fence_dummy]) AC_CONFIG_FILES([cts/pacemaker-cts-dummyd], [chmod +x cts/pacemaker-cts-dummyd]) AC_CONFIG_FILES([daemons/fenced/fence_legacy], [chmod +x daemons/fenced/fence_legacy]) AC_CONFIG_FILES([doc/abi-check], [chmod +x doc/abi-check]) AC_CONFIG_FILES([extra/resources/ClusterMon], [chmod +x extra/resources/ClusterMon]) AC_CONFIG_FILES([extra/resources/HealthSMART], [chmod +x extra/resources/HealthSMART]) AC_CONFIG_FILES([extra/resources/SysInfo], [chmod +x extra/resources/SysInfo]) AC_CONFIG_FILES([extra/resources/ifspeed], [chmod +x extra/resources/ifspeed]) AC_CONFIG_FILES([extra/resources/o2cb], [chmod +x extra/resources/o2cb]) AC_CONFIG_FILES([tools/crm_failcount], [chmod +x tools/crm_failcount]) AC_CONFIG_FILES([tools/crm_master], [chmod +x tools/crm_master]) AC_CONFIG_FILES([tools/crm_report], [chmod +x tools/crm_report]) AC_CONFIG_FILES([tools/crm_standby], [chmod +x tools/crm_standby]) AC_CONFIG_FILES([tools/cibsecret], [chmod +x tools/cibsecret]) AC_CONFIG_FILES([tools/pcmk_simtimes], [chmod +x tools/pcmk_simtimes]) dnl Other files we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTS.py \ cts/CTSvars.py \ cts/benchmark/Makefile \ cts/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/schedulerd/Makefile \ devel/Makefile \ doc/Doxyfile \ doc/Makefile \ - doc/Clusters_from_Scratch/publican.cfg \ - doc/Pacemaker_Administration/publican.cfg \ - doc/Pacemaker_Development/publican.cfg \ - doc/Pacemaker_Explained/publican.cfg \ - doc/Pacemaker_Remote/publican.cfg \ doc/sphinx/Makefile \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ replace/Makefile \ lib/Makefile \ lib/libpacemaker.pc \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/agents/Makefile \ lib/common/tests/cmdline/Makefile \ lib/common/tests/flags/Makefile \ lib/common/tests/operations/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/gnu/Makefile \ lib/pacemaker/Makefile \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/rules/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ maint/Makefile \ tests/Makefile \ tools/Makefile \ tools/report.collector \ tools/report.common \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_RESULT([ Unix socket auth method = ${us_auth}]) diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt deleted file mode 100644 index 381bd68a5b..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt +++ /dev/null @@ -1,363 +0,0 @@ -:compat-mode: legacy -[appendix] -== Configuration Recap == - -=== Final Cluster Configuration === - ----- -[root@pcmk-1 ~]# pcs resource - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 pcmk-2 ] - Clone Set: dlm-clone [dlm] - Started: [ pcmk-1 pcmk-2 ] - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - Clone Set: WebFS-clone [WebFS] - Started: [ pcmk-1 pcmk-2 ] - WebSite (ocf::heartbeat:apache): Started pcmk-1 ----- - ----- -[root@pcmk-1 ~]# pcs resource op defaults -timeout: 240s ----- - ----- -[root@pcmk-1 ~]# pcs stonith - impi-fencing (stonith:fence_ipmilan): Started pcmk-1 ----- - ----- -[root@pcmk-1 ~]# pcs constraint -Location Constraints: -Ordering Constraints: - start ClusterIP then start WebSite (kind:Mandatory) - promote WebDataClone then start WebFS-clone (kind:Mandatory) - start WebFS-clone then start WebSite (kind:Mandatory) - start dlm-clone then start WebFS-clone (kind:Mandatory) -Colocation Constraints: - WebSite with ClusterIP (score:INFINITY) - WebFS-clone with WebDataClone (score:INFINITY) (with-rsc-role:Master) - WebSite with WebFS-clone (score:INFINITY) - WebFS-clone with dlm-clone (score:INFINITY) -Ticket Constraints: ----- - ----- -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-1 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Tue Sep 11 10:41:53 2018 -Last change: Tue Sep 11 10:40:16 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -11 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ipmi-fencing (stonith:fence_ipmilan): Started pcmk-1 - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 pcmk-2 ] - Clone Set: dlm-clone [dlm] - Started: [ pcmk-1 pcmk-2 ] - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - Clone Set: WebFS-clone [WebFS] - Started: [ pcmk-1 pcmk-2 ] - WebSite (ocf::heartbeat:apache): Started pcmk-1 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - ----- -[root@pcmk-1 ~]# pcs cluster cib --config ----- -[source,XML] ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ----- - -=== Node List === - ----- -[root@pcmk-1 ~]# pcs status nodes -Pacemaker Nodes: - Online: pcmk-1 pcmk-2 - Standby: - Maintenance: - Offline: -Pacemaker Remote Nodes: - Online: - Standby: - Maintenance: - Offline: ----- - -=== Cluster Options === - ----- -[root@pcmk-1 ~]# pcs property -Cluster Properties: - cluster-infrastructure: corosync - cluster-name: mycluster - dc-version: 1.1.18-11.el7_5.3-2b07d5c5a9 - have-watchdog: false - last-lrm-refresh: 1536679009 - stonith-enabled: true ----- - -The output shows state information automatically obtained about the cluster, including: - -* *cluster-infrastructure* - the cluster communications layer in use -* *cluster-name* - the cluster name chosen by the administrator when the cluster was created -* *dc-version* - the version (including upstream source-code hash) of Pacemaker - used on the Designated Controller, which is the node elected to determine what - actions are needed when events occur - -The output also shows options set by the administrator that control the way the cluster operates, including: - -* *stonith-enabled=true* - whether the cluster is allowed to use STONITH resources - -=== Resources === - -==== Default Options ==== - ----- -[root@pcmk-1 ~]# pcs resource defaults -resource-stickiness: 100 ----- - -This shows cluster option defaults that apply to every resource that does not -explicitly set the option itself. Above: - -* *resource-stickiness* - Specify the aversion to moving healthy resources to other machines - -==== Fencing ==== - ----- -[root@pcmk-1 ~]# pcs stonith show - ipmi-fencing (stonith:fence_ipmilan): Started pcmk-1 -[root@pcmk-1 ~]# pcs stonith show ipmi-fencing - Resource: ipmi-fencing (class=stonith type=fence_ipmilan) - Attributes: ipaddr="10.0.0.1" login="testuser" passwd="acd123" pcmk_host_list="pcmk-1 pcmk-2" - Operations: monitor interval=60s (fence-monitor-interval-60s) ----- - -==== Service Address ==== - -Users of the services provided by the cluster require an unchanging -address with which to access it. - ----- -[root@pcmk-1 ~]# pcs resource show ClusterIP -Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2) - Attributes: cidr_netmask=24 ip=192.168.122.120 clusterip_hash=sourceip - Meta Attrs: resource-stickiness=0 - Operations: monitor interval=30s (ClusterIP-monitor-interval-30s) - start interval=0s timeout=20s (ClusterIP-start-interval-0s) - stop interval=0s timeout=20s (ClusterIP-stop-interval-0s) ----- - -==== DRBD - Shared Storage ==== - -Here, we define the DRBD service and specify which DRBD resource (from -/etc/drbd.d/*.res) it should manage. We make it a master clone resource and, in -order to have an active/active setup, allow both instances to be promoted to master -at the same time. We also set the notify option so that the -cluster will tell DRBD agent when its peer changes state. - ----- -[root@pcmk-1 ~]# pcs resource show WebDataClone - Master: WebDataClone - Meta Attrs: master-node-max=1 clone-max=2 notify=true master-max=2 clone-node-max=1 - Resource: WebData (class=ocf provider=linbit type=drbd) - Attributes: drbd_resource=wwwdata - Operations: demote interval=0s timeout=90 (WebData-demote-interval-0s) - monitor interval=60s (WebData-monitor-interval-60s) - notify interval=0s timeout=90 (WebData-notify-interval-0s) - promote interval=0s timeout=90 (WebData-promote-interval-0s) - reload interval=0s timeout=30 (WebData-reload-interval-0s) - start interval=0s timeout=240 (WebData-start-interval-0s) - stop interval=0s timeout=100 (WebData-stop-interval-0s) -[root@pcmk-1 ~]# pcs constraint ref WebDataClone -Resource: WebDataClone - colocation-WebFS-WebDataClone-INFINITY - order-WebDataClone-WebFS-mandatory ----- - -==== Cluster Filesystem ==== - -The cluster filesystem ensures that files are read and written correctly. -We need to specify the block device (provided by DRBD), where we want it -mounted and that we are using GFS2. Again, it is a clone because it is -intended to be active on both nodes. The additional constraints ensure -that it can only be started on nodes with active DLM and DRBD instances. - ----- -[root@pcmk-1 ~]# pcs resource show WebFS-clone - Clone: WebFS-clone - Resource: WebFS (class=ocf provider=heartbeat type=Filesystem) - Attributes: device=/dev/drbd1 directory=/var/www/html fstype=gfs2 - Operations: monitor interval=20 timeout=40 (WebFS-monitor-interval-20) - notify interval=0s timeout=60 (WebFS-notify-interval-0s) - start interval=0s timeout=60 (WebFS-start-interval-0s) - stop interval=0s timeout=60 (WebFS-stop-interval-0s) -[root@pcmk-1 ~]# pcs constraint ref WebFS-clone -Resource: WebFS-clone - colocation-WebFS-WebDataClone-INFINITY - colocation-WebSite-WebFS-INFINITY - colocation-WebFS-dlm-clone-INFINITY - order-WebDataClone-WebFS-mandatory - order-WebFS-WebSite-mandatory - order-dlm-clone-WebFS-mandatory ----- - -==== Apache ==== - -Lastly, we have the actual service, Apache. We need only tell the cluster -where to find its main configuration file and restrict it to running on -a node that has the required filesystem mounted and the IP address active. - ----- -[root@pcmk-1 ~]# pcs resource show WebSite -Resource: WebSite (class=ocf provider=heartbeat type=apache) - Attributes: configfile=/etc/httpd/conf/httpd.conf statusurl=http://localhost/server-status - Operations: monitor interval=1min (WebSite-monitor-interval-1min) - start interval=0s timeout=40s (WebSite-start-interval-0s) - stop interval=0s timeout=60s (WebSite-stop-interval-0s) -[root@pcmk-1 ~]# pcs constraint ref WebSite -Resource: WebSite - colocation-WebSite-ClusterIP-INFINITY - colocation-WebSite-WebFS-INFINITY - order-ClusterIP-WebSite-mandatory - order-WebFS-WebSite-mandatory ----- diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Corosync-Conf.txt b/doc/Clusters_from_Scratch/en-US/Ap-Corosync-Conf.txt deleted file mode 100644 index ea286275da..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ap-Corosync-Conf.txt +++ /dev/null @@ -1,37 +0,0 @@ -:compat-mode: legacy -[appendix] -[[ap-corosync-conf]] -== Sample Corosync Configuration == - -.Sample +corosync.conf+ for two-node cluster created by `pcs`. -..... -totem { - version: 2 - cluster_name: mycluster - secauth: off - transport: udpu -} - -nodelist { - node { - ring0_addr: pcmk-1 - nodeid: 1 - } - - node { - ring0_addr: pcmk-2 - nodeid: 2 - } -} - -quorum { - provider: corosync_votequorum - two_node: 1 -} - -logging { - to_logfile: yes - logfile: /var/log/cluster/corosync.log - to_syslog: yes -} -..... diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Reading.txt b/doc/Clusters_from_Scratch/en-US/Ap-Reading.txt deleted file mode 100644 index 750fd74b2a..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ap-Reading.txt +++ /dev/null @@ -1,11 +0,0 @@ -:compat-mode: legacy -[appendix] -== Further Reading == - -- Project Website https://www.clusterlabs.org/ - -- SuSE has a comprehensive guide to cluster commands (though using the `crmsh` command-line - shell rather than `pcs`) at: - https://www.suse.com/documentation/sle_ha/book_sleha/data/book_sleha.html - -- Corosync http://www.corosync.org/ diff --git a/doc/Clusters_from_Scratch/en-US/Author_Group.xml b/doc/Clusters_from_Scratch/en-US/Author_Group.xml deleted file mode 100644 index ff907feee5..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Author_Group.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - Written by the Pacemaker project contributors - - - diff --git a/doc/Clusters_from_Scratch/en-US/Book_Info.xml b/doc/Clusters_from_Scratch/en-US/Book_Info.xml deleted file mode 100644 index 2eec85c420..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Book_Info.xml +++ /dev/null @@ -1,71 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - Clusters from Scratch - Step-by-Step Instructions for Building Your First High-Availability Cluster - Pacemaker - 2.0 - - 11 - 1 - - - This document provides a step-by-step guide to building a simple high-availability cluster using Pacemaker. - - - The example cluster will use: - - - - &DISTRO; &DISTRO_VERSION; as the host operating system - - - - - Corosync to provide messaging and membership services, - - - - - Pacemaker 1.1.18 - While this guide is part of the document set for - Pacemaker 2.0, it demonstrates the version available in - the standard &DISTRO; repositories. - - - - - DRBD as a cost-effective alternative to shared storage, - - - - - GFS2 as the cluster filesystem (in active/active mode) - - - - - - Given the graphical nature of the install process, a number of - screenshots are included. However the guide is primarily composed of - commands, the reasons for executing them and their expected outputs. - - - - - - - - - - - - diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt deleted file mode 100644 index 57e376e484..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt +++ /dev/null @@ -1,272 +0,0 @@ -:compat-mode: legacy -= Convert Storage to Active/Active = - -The primary requirement for an Active/Active cluster is that the data -required for your services is available, simultaneously, on both -machines. Pacemaker makes no requirement on how this is achieved; you -could use a SAN if you had one available, but since DRBD supports -multiple Primaries, we can continue to use it here. - -== Install Cluster Filesystem Software == - -The only hitch is that we need to use a cluster-aware filesystem. The -one we used earlier with DRBD, xfs, is not one of those. Both OCFS2 -and GFS2 are supported; here, we will use GFS2. - -On both nodes, install the GFS2 command-line utilities and the -Distributed Lock Manager (DLM) required by cluster filesystems: ----- -# yum install -y gfs2-utils dlm ----- - -== Configure the Cluster for the DLM == - -The DLM control daemon needs to run on both nodes, so we'll start by creating a -resource for it (using the *ocf:pacemaker:controld* resource script), and clone -it: ----- -[root@pcmk-1 ~]# pcs cluster cib dlm_cfg -[root@pcmk-1 ~]# pcs -f dlm_cfg resource create dlm \ - ocf:pacemaker:controld op monitor interval=60s -[root@pcmk-1 ~]# pcs -f dlm_cfg resource clone dlm clone-max=2 clone-node-max=1 -[root@pcmk-1 ~]# pcs -f dlm_cfg resource show - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 ] - Slaves: [ pcmk-2 ] - WebFS (ocf::heartbeat:Filesystem): Started pcmk-1 - Clone Set: dlm-clone [dlm] - Stopped: [ pcmk-1 pcmk-2 ] ----- - -Activate our new configuration, and see how the cluster responds: ----- -[root@pcmk-1 ~]# pcs cluster cib-push dlm_cfg --config -CIB updated -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-1 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Tue Sep 11 10:18:30 2018 -Last change: Tue Sep 11 10:16:49 2018 by hacluster via crmd on pcmk-2 - -2 nodes configured -8 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ipmi-fencing (stonith:fence_ipmilan): Started pcmk-1 - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 ] - Slaves: [ pcmk-2 ] - WebFS (ocf::heartbeat:Filesystem): Started pcmk-1 - Clone Set: dlm-clone [dlm] - Started: [ pcmk-1 pcmk-2 ] - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -[[GFS2_prep]] -== Create and Populate GFS2 Filesystem == - -Before we do anything to the existing partition, we need to make sure it -is unmounted. We do this by telling the cluster to stop the WebFS resource. -This will ensure that other resources (in our case, Apache) using WebFS -are not only stopped, but stopped in the correct order. - ----- -[root@pcmk-1 ~]# pcs resource disable WebFS -[root@pcmk-1 ~]# pcs resource - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Stopped - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 ] - Slaves: [ pcmk-2 ] - WebFS (ocf::heartbeat:Filesystem): Stopped (disabled) - Clone Set: dlm-clone [dlm] - Started: [ pcmk-1 pcmk-2 ] ----- - -You can see that both Apache and WebFS have been stopped, -and that *pcmk-1* is the current master for the DRBD device. - -Now we can create a new GFS2 filesystem on the DRBD device. - -[WARNING] -========= -This will erase all previous content stored on the DRBD device. Ensure -you have a copy of any important data. -========= - -[IMPORTANT] -=========== -Run the next command on whichever node has the DRBD Primary role. -Otherwise, you will receive the message: ------ -/dev/drbd1: Read-only file system ------ -=========== - ------ -[root@pcmk-1 ~]# mkfs.gfs2 -p lock_dlm -j 2 -t mycluster:web /dev/drbd1 -It appears to contain an existing filesystem (xfs) -This will destroy any data on /dev/drbd1 -Are you sure you want to proceed? [y/n] y -Discarding device contents (may take a while on large devices): Done -Adding journals: Done -Building resource groups: Done -Creating quota file: Done -Writing superblock and syncing: Done -Device: /dev/drbd1 -Block size: 4096 -Device size: 0.50 GB (131059 blocks) -Filesystem size: 0.50 GB (131056 blocks) -Journals: 2 -Resource groups: 3 -Locking protocol: "lock_dlm" -Lock table: "mycluster:web" -UUID: 0bcbffab-cada-4105-94d1-be8a26669ee0 ------ - -The `mkfs.gfs2` command required a number of additional parameters: - -* `-p lock_dlm` specifies that we want to use the -kernel's DLM. - -* `-j 2` indicates that the filesystem should reserve enough -space for two journals (one for each node that will access the filesystem). - -* `-t mycluster:web` specifies the lock table name. The format for -this field is +pass:[clustername:fsname]+. For -+pass:[clustername]+, we need to use the same -value we specified originally with `pcs cluster setup --name` (which is also -the value of *cluster_name* in +/etc/corosync/corosync.conf+). -If you are unsure what your cluster name is, you can look in -+/etc/corosync/corosync.conf+ or execute the command -`pcs cluster corosync pcmk-1 | grep cluster_name`. - -Now we can (re-)populate the new filesystem with data -(web pages). We'll create yet another variation on our home page. - ------ -[root@pcmk-1 ~]# mount /dev/drbd1 /mnt -[root@pcmk-1 ~]# cat <<-END >/mnt/index.html - -My Test Site - GFS2 - -END -[root@pcmk-1 ~]# chcon -R --reference=/var/www/html /mnt -[root@pcmk-1 ~]# umount /dev/drbd1 -[root@pcmk-1 ~]# drbdadm verify wwwdata ------ - -== Reconfigure the Cluster for GFS2 == - -With the WebFS resource stopped, let's update the configuration. - ----- -[root@pcmk-1 ~]# pcs resource show WebFS - Resource: WebFS (class=ocf provider=heartbeat type=Filesystem) - Attributes: device=/dev/drbd1 directory=/var/www/html fstype=xfs - Meta Attrs: target-role=Stopped - Operations: monitor interval=20 timeout=40 (WebFS-monitor-interval-20) - notify interval=0s timeout=60 (WebFS-notify-interval-0s) - start interval=0s timeout=60 (WebFS-start-interval-0s) - stop interval=0s timeout=60 (WebFS-stop-interval-0s) ----- - -The fstype option needs to be updated to *gfs2* instead of *xfs*. - ----- -[root@pcmk-1 ~]# pcs resource update WebFS fstype=gfs2 -[root@pcmk-1 ~]# pcs resource show WebFS - Resource: WebFS (class=ocf provider=heartbeat type=Filesystem) - Attributes: device=/dev/drbd1 directory=/var/www/html fstype=gfs2 - Meta Attrs: target-role=Stopped - Operations: monitor interval=20 timeout=40 (WebFS-monitor-interval-20) - notify interval=0s timeout=60 (WebFS-notify-interval-0s) - start interval=0s timeout=60 (WebFS-start-interval-0s) - stop interval=0s timeout=60 (WebFS-stop-interval-0s) ----- - -GFS2 requires that DLM be running, so we also need to set up new colocation -and ordering constraints for it: ----- -[root@pcmk-1 ~]# pcs constraint colocation add WebFS with dlm-clone INFINITY -[root@pcmk-1 ~]# pcs constraint order dlm-clone then WebFS -Adding dlm-clone WebFS (kind: Mandatory) (Options: first-action=start then-action=start) ----- - -== Clone the Filesystem Resource == - -Now that we have a cluster filesystem ready to go, we can configure the cluster -so both nodes mount the filesystem. - -Clone the filesystem resource in a new configuration. -Notice how pcs automatically updates the relevant constraints again. ----- -[root@pcmk-1 ~]# pcs cluster cib active_cfg -[root@pcmk-1 ~]# pcs -f active_cfg resource clone WebFS -[root@pcmk-1 ~]# pcs -f active_cfg constraint -Location Constraints: -Ordering Constraints: - start ClusterIP then start WebSite (kind:Mandatory) - promote WebDataClone then start WebFS-clone (kind:Mandatory) - start WebFS-clone then start WebSite (kind:Mandatory) - start dlm-clone then start WebFS-clone (kind:Mandatory) -Colocation Constraints: - WebSite with ClusterIP (score:INFINITY) - WebFS-clone with WebDataClone (score:INFINITY) (with-rsc-role:Master) - WebSite with WebFS-clone (score:INFINITY) - WebFS-clone with dlm-clone (score:INFINITY) -Ticket Constraints: ----- - -Tell the cluster that it is now allowed to promote both instances to be DRBD -Primary (aka. master). - ------ -[root@pcmk-1 ~]# pcs -f active_cfg resource update WebDataClone master-max=2 ------ - -Finally, load our configuration to the cluster, and re-enable the WebFS resource -(which we disabled earlier). ------ -[root@pcmk-1 ~]# pcs cluster cib-push active_cfg --config -CIB updated -[root@pcmk-1 ~]# pcs resource enable WebFS ------ - -After all the processes are started, the status should look similar to this. ------ -[root@pcmk-1 ~]# pcs resource - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 pcmk-2 ] - Clone Set: dlm-clone [dlm] - Started: [ pcmk-1 pcmk-2 ] - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - Clone Set: WebFS-clone [WebFS] - Started: [ pcmk-1 pcmk-2 ] - WebSite (ocf::heartbeat:apache): Started pcmk-1 ------ - -== Test Failover == - -Testing failover is left as an exercise for the reader. - -With this configuration, the data is now active/active. The website -administrator could change HTML files on either node, and the live website will -show the changes even if it is running on the opposite node. - -If the web server is configured to listen on all IP addresses, it is possible -to remove the constraints between the WebSite and ClusterIP resources, and -clone the WebSite resource. The web server would always be ready to serve web -pages, and only the IP address would need to be moved in a failover. diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt deleted file mode 100644 index f75cb34e2b..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt +++ /dev/null @@ -1,268 +0,0 @@ -:compat-mode: legacy -= Create an Active/Passive Cluster = - -== Add a Resource == - -Our first resource will be a unique IP address that the cluster can bring up on -either node. Regardless of where any cluster service(s) are running, end -users need a consistent address to contact them on. Here, I will choose -192.168.122.120 as the floating address, give it the imaginative name ClusterIP -and tell the cluster to check whether it is running every 30 seconds. - -[WARNING] -=========== -The chosen address must not already be in use on the network. -Do not reuse an IP address one of the nodes already has configured. -=========== - ----- -[root@pcmk-1 ~]# pcs resource create ClusterIP ocf:heartbeat:IPaddr2 \ - ip=192.168.122.120 cidr_netmask=24 op monitor interval=30s ----- - -Another important piece of information here is *ocf:heartbeat:IPaddr2*. -This tells Pacemaker three things about the resource you want to add: - -* The first field (*ocf* in this case) is the standard to which the resource -script conforms and where to find it. - -* The second field (*heartbeat* in this case) is standard-specific; for OCF -resources, it tells the cluster which OCF namespace the resource script is in. - -* The third field (*IPaddr2* in this case) is the name of the resource script. - -To obtain a list of the available resource standards (the *ocf* part of -*ocf:heartbeat:IPaddr2*), run: - ----- -[root@pcmk-1 ~]# pcs resource standards -lsb -ocf -service -systemd ----- - -To obtain a list of the available OCF resource providers (the *heartbeat* -part of *ocf:heartbeat:IPaddr2*), run: - ----- -[root@pcmk-1 ~]# pcs resource providers -heartbeat -openstack -pacemaker ----- - -Finally, if you want to see all the resource agents available for -a specific OCF provider (the *IPaddr2* part of *ocf:heartbeat:IPaddr2*), run: - ----- -[root@pcmk-1 ~]# pcs resource agents ocf:heartbeat -apache -aws-vpc-move-ip -awseip -awsvip -azure-lb -clvm -. -. (skipping lots of resources to save space) -. -symlink -tomcat -VirtualDomain -Xinetd ----- - -Now, verify that the IP resource has been added, and display the cluster's -status to see that it is now active: - ----- -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 16:55:26 2018 -Last change: Mon Sep 10 16:53:42 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -1 resource configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -== Perform a Failover == - -Since our ultimate goal is high availability, we should test failover of -our new resource before moving on. - -First, find the node on which the IP address is running. - ----- -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 16:55:26 2018 -Last change: Mon Sep 10 16:53:42 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -1 resource configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 ----- - -You can see that the status of the *ClusterIP* resource -is *Started* on a particular node (in this example, *pcmk-1*). -Shut down Pacemaker and Corosync on that machine to trigger a failover. - ----- -[root@pcmk-1 ~]# pcs cluster stop pcmk-1 -Stopping Cluster (pacemaker)... -Stopping Cluster (corosync)... ----- - -[NOTE] -====== -A cluster command such as +pcs cluster stop pass:[nodename]+ can be run -from any node in the cluster, not just the affected node. -====== - -Verify that pacemaker and corosync are no longer running: ----- -[root@pcmk-1 ~]# pcs status -Error: cluster is not currently running on this node ----- - -Go to the other node, and check the cluster status. - ----- -[root@pcmk-2 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 16:57:22 2018 -Last change: Mon Sep 10 16:53:42 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -1 resource configured - -Online: [ pcmk-2 ] -OFFLINE: [ pcmk-1 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -Notice that *pcmk-1* is *OFFLINE* for cluster purposes (its *pcsd* is still -active, allowing it to receive `pcs` commands, but it is not participating in -the cluster). - -Also notice that *ClusterIP* is now running on *pcmk-2* -- failover happened -automatically, and no errors are reported. - -[IMPORTANT] -.Quorum -==== -If a cluster splits into two (or more) groups of nodes that can no longer -communicate with each other (aka. _partitions_), _quorum_ is used to prevent -resources from starting on more nodes than desired, which would risk -data corruption. - -A cluster has quorum when more than half of all known nodes are online in -the same partition, or for the mathematically inclined, whenever the following -equation is true: -.... -total_nodes < 2 * active_nodes -.... - -For example, if a 5-node cluster split into 3- and 2-node paritions, -the 3-node partition would have quorum and could continue serving resources. -If a 6-node cluster split into two 3-node partitions, neither partition -would have quorum; pacemaker's default behavior in such cases is to -stop all resources, in order to prevent data corruption. - -Two-node clusters are a special case. By the above definition, -a two-node cluster would only have quorum when both nodes are -running. This would make the creation of a two-node cluster pointless, -but corosync has the ability to treat two-node clusters as if only one node -is required for quorum. - -The `pcs cluster setup` command will automatically configure *two_node: 1* -in +corosync.conf+, so a two-node cluster will "just work". - -If you are using a different cluster shell, you will have to configure -+corosync.conf+ appropriately yourself. -==== - -Now, simulate node recovery by restarting the cluster stack on *pcmk-1*, and -check the cluster's status. (It may take a little while before the cluster -gets going on the node, but it eventually will look like the below.) - ----- -[root@pcmk-1 ~]# pcs cluster start pcmk-1 -pcmk-1: Starting Cluster... -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 17:00:04 2018 -Last change: Mon Sep 10 16:53:42 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -1 resource configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -== Prevent Resources from Moving after Recovery == - -In most circumstances, it is highly desirable to prevent healthy -resources from being moved around the cluster. Moving resources almost -always requires a period of downtime. For complex services such as -databases, this period can be quite long. - -To address this, Pacemaker has the concept of resource _stickiness_, -which controls how strongly a service prefers to stay running where it -is. You may like to think of it as the "cost" of any downtime. By -default, Pacemaker assumes there is zero cost associated with moving -resources and will do so to achieve "optimal" -footnote:[Pacemaker's definition of optimal may not always agree with that of a -human's. The order in which Pacemaker processes lists of resources and nodes -creates implicit preferences in situations where the administrator has not -explicitly specified them.] -resource placement. We can specify a different stickiness for every -resource, but it is often sufficient to change the default. - ----- -[root@pcmk-1 ~]# pcs resource defaults resource-stickiness=100 -Warning: Defaults do not apply to resources which override them with their own defined values -[root@pcmk-1 ~]# pcs resource defaults -resource-stickiness: 100 ----- diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt deleted file mode 100644 index efa2c763b5..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt +++ /dev/null @@ -1,424 +0,0 @@ -:compat-mode: legacy -= Add Apache HTTP Server as a Cluster Service = - -indexterm:[Apache HTTP Server] - -Now that we have a basic but functional active/passive two-node cluster, -we're ready to add some real services. We're going to start with -Apache HTTP Server because it is a feature of many clusters and relatively -simple to configure. - -== Install Apache == - -Before continuing, we need to make sure Apache is installed on both -hosts. We also need the wget tool in order for the cluster to be able to check -the status of the Apache server. - ----- -# yum install -y httpd wget -# firewall-cmd --permanent --add-service=http -# firewall-cmd --reload ----- - -[IMPORTANT] -==== -Do *not* enable the httpd service. Services that are intended to -be managed via the cluster software should never be managed by the OS. -It is often useful, however, to manually start the service, verify that -it works, then stop it again, before adding it to the cluster. This -allows you to resolve any non-cluster-related problems before continuing. -Since this is a simple example, we'll skip that step here. -==== - -== Create Website Documents == - -We need to create a page for Apache to serve. On &DISTRO; &DISTRO_VERSION;, the -default Apache document root is /var/www/html, so we'll create an index file -there. For the moment, we will simplify things by serving a static site -and manually synchronizing the data between the two nodes, so run this command -on both nodes: - ------ -# cat <<-END >/var/www/html/index.html - - My Test Site - $(hostname) - -END ------ - -== Enable the Apache status URL == - -indexterm:[Apache HTTP Server,/server-status] - -In order to monitor the health of your Apache instance, and recover it if -it fails, the resource agent used by Pacemaker assumes the server-status -URL is available. On both nodes, enable the URL with: - ----- -# cat <<-END >/etc/httpd/conf.d/status.conf - - SetHandler server-status - Require local - -END ----- - -[NOTE] -====== -If you are using a different operating system, server-status may already be -enabled or may be configurable in a different location. If you are using -a version of Apache HTTP Server less than 2.4, the syntax will be different. -====== - -== Configure the Cluster == - -indexterm:[Apache HTTP Server,Apache resource configuration] - -At this point, Apache is ready to go, and all that needs to be done is to -add it to the cluster. Let's call the resource WebSite. We need to use -an OCF resource script called apache in the heartbeat namespace. -footnote:[Compare the key used here, *ocf:heartbeat:apache*, with the one we -used earlier for the IP address, *ocf:heartbeat:IPaddr2*] -The script's only required parameter is the path to the main Apache -configuration file, and we'll tell the cluster to check once a -minute that Apache is still running. - ----- -[root@pcmk-1 ~]# pcs resource create WebSite ocf:heartbeat:apache \ - configfile=/etc/httpd/conf/httpd.conf \ - statusurl="http://localhost/server-status" \ - op monitor interval=1min ----- - -By default, the operation timeout for all resources' start, stop, and monitor -operations is 20 seconds. In many cases, this timeout period is less than -a particular resource's advised timeout period. For the purposes of this -tutorial, we will adjust the global operation timeout default to 240 seconds. - ----- -[root@pcmk-1 ~]# pcs resource op defaults timeout=240s -Warning: Defaults do not apply to resources which override them with their own defined values -[root@pcmk-1 ~]# pcs resource op defaults -timeout: 240s ----- - -[NOTE] -====== -In a production cluster, it is usually better to adjust each resource's -start, stop, and monitor timeouts to values that are appropriate to -the behavior observed in your environment, rather than adjust -the global default. -====== - -After a short delay, we should see the cluster start Apache. - ------ -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 17:06:22 2018 -Last change: Mon Sep 10 17:05:41 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -2 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ------ - -Wait a moment, the WebSite resource isn't running on the same host as our -IP address! - -[NOTE] -====== -If, in the `pcs status` output, you see the WebSite resource has -failed to start, then you've likely not enabled the status URL correctly. -You can check whether this is the problem by running: - -.... -wget -O - http://localhost/server-status -.... - -If you see *Not Found* or *Forbidden* in the output, then this is likely the -problem. Ensure that the ** block is correct. - -====== - -== Ensure Resources Run on the Same Host == - -To reduce the load on any one machine, Pacemaker will generally try to -spread the configured resources across the cluster nodes. However, we -can tell the cluster that two resources are related and need to run on -the same host (or not at all). Here, we instruct the cluster that -WebSite can only run on the host that ClusterIP is active on. - -To achieve this, we use a _colocation constraint_ that indicates it is -mandatory for WebSite to run on the same node as ClusterIP. The -"mandatory" part of the colocation constraint is indicated by using a -score of INFINITY. The INFINITY score also means that if ClusterIP is not -active anywhere, WebSite will not be permitted to run. - -[NOTE] -======= -If ClusterIP is not active anywhere, WebSite will not be permitted to run -anywhere. -======= - -[IMPORTANT] -=========== -Colocation constraints are "directional", in that they imply certain -things about the order in which the two resources will have a location -chosen. In this case, we're saying that *WebSite* needs to be placed on the -same machine as *ClusterIP*, which implies that the cluster must know the -location of *ClusterIP* before choosing a location for *WebSite*. -=========== - ------ -[root@pcmk-1 ~]# pcs constraint colocation add WebSite with ClusterIP INFINITY -[root@pcmk-1 ~]# pcs constraint -Location Constraints: -Ordering Constraints: -Colocation Constraints: - WebSite with ClusterIP (score:INFINITY) -Ticket Constraints: -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 17:08:54 2018 -Last change: Mon Sep 10 17:08:27 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -2 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 - WebSite (ocf::heartbeat:apache): Started pcmk-2 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ------ - -== Ensure Resources Start and Stop in Order == - -Like many services, Apache can be configured to bind to specific -IP addresses on a host or to the wildcard IP address. If Apache -binds to the wildcard, it doesn't matter whether an IP address -is added before or after Apache starts; Apache will respond on -that IP just the same. However, if Apache binds only to certain IP -address(es), the order matters: If the address is added after Apache -starts, Apache won't respond on that address. - -To be sure our WebSite responds regardless of Apache's address configuration, -we need to make sure ClusterIP not only runs on the same node, -but starts before WebSite. A colocation constraint only ensures the -resources run together, not the order in which they are started and stopped. - -We do this by adding an ordering constraint. By default, all order constraints -are mandatory, which means that the recovery of ClusterIP will also trigger the -recovery of WebSite. - ------ -[root@pcmk-1 ~]# pcs constraint order ClusterIP then WebSite -Adding ClusterIP WebSite (kind: Mandatory) (Options: first-action=start then-action=start) -[root@pcmk-1 ~]# pcs constraint -Location Constraints: -Ordering Constraints: - start ClusterIP then start WebSite (kind:Mandatory) -Colocation Constraints: - WebSite with ClusterIP (score:INFINITY) -Ticket Constraints: ------ - -== Prefer One Node Over Another == - -Pacemaker does not rely on any sort of hardware symmetry between nodes, -so it may well be that one machine is more powerful than the other. - -In such cases, you may want to host the resources on the more powerful node -when it is available, to have the best performance -- or you may want to host -the resources on the _less_ powerful node when it's available, so you don't -have to worry about whether you can handle the load after a failover. - -To do this, we create a location constraint. - -In the location constraint below, we are saying the WebSite resource -prefers the node pcmk-1 with a score of 50. Here, the score indicates -how strongly we'd like the resource to run at this location. - ------ -[root@pcmk-1 ~]# pcs constraint location WebSite prefers pcmk-1=50 -[root@pcmk-1 ~]# pcs constraint -Location Constraints: - Resource: WebSite - Enabled on: pcmk-1 (score:50) -Ordering Constraints: - start ClusterIP then start WebSite (kind:Mandatory) -Colocation Constraints: - WebSite with ClusterIP (score:INFINITY) -Ticket Constraints: -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 17:21:41 2018 -Last change: Mon Sep 10 17:21:14 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -2 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 - WebSite (ocf::heartbeat:apache): Started pcmk-2 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ------ - -Wait a minute, the resources are still on pcmk-2! - -Even though WebSite now prefers to run on pcmk-1, that preference is -(intentionally) less than the resource stickiness (how much we -preferred not to have unnecessary downtime). - -To see the current placement scores, you can use a tool called crm_simulate. - ----- -[root@pcmk-1 ~]# crm_simulate -sL - -Current cluster status: -Online: [ pcmk-1 pcmk-2 ] - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 - WebSite (ocf::heartbeat:apache): Started pcmk-2 - -Allocation scores: -native_color: ClusterIP allocation score on pcmk-1: 50 -native_color: ClusterIP allocation score on pcmk-2: 200 -native_color: WebSite allocation score on pcmk-1: -INFINITY -native_color: WebSite allocation score on pcmk-2: 100 - -Transition Summary: ----- - -== Move Resources Manually == - -There are always times when an administrator needs to override the -cluster and force resources to move to a specific location. In this example, -we will force the WebSite to move to pcmk-1. - -We will use the *pcs resource move* command to create a temporary constraint -with a score of INFINITY. While we could update our existing constraint, -using *move* allows to easily get rid of the temporary constraint later. -If desired, we could even give a lifetime for the constraint, so it would -expire automatically -- but we don't that in this example. - ------ -[root@pcmk-1 ~]# pcs resource move WebSite pcmk-1 -[root@pcmk-1 ~]# pcs constraint -Location Constraints: - Resource: WebSite - Enabled on: pcmk-1 (score:50) - Enabled on: pcmk-1 (score:INFINITY) (role: Started) -Ordering Constraints: - start ClusterIP then start WebSite (kind:Mandatory) -Colocation Constraints: - WebSite with ClusterIP (score:INFINITY) -Ticket Constraints: -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 17:28:55 2018 -Last change: Mon Sep 10 17:28:27 2018 by root via crm_resource on pcmk-1 - -2 nodes configured -2 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ------ - -Once we've finished whatever activity required us to move the -resources to pcmk-1 (in our case nothing), we can then allow the cluster -to resume normal operation by removing the new constraint. Due to our first -location constraint and our default stickiness, the resources will remain on -pcmk-1. - -We will use the *pcs resource clear* command, which removes all temporary -constraints previously created by *pcs resource move* or *pcs resource ban*. ------ -[root@pcmk-1 ~]# pcs resource clear WebSite -[root@pcmk-1 ~]# pcs constraint -Location Constraints: - Resource: WebSite - Enabled on: pcmk-1 (score:50) -Ordering Constraints: - start ClusterIP then start WebSite (kind:Mandatory) -Colocation Constraints: - WebSite with ClusterIP (score:INFINITY) -Ticket Constraints: ------ - -Note that the INFINITY location constraint is now gone. If we check the cluster -status, we can also see that (as expected) the resources are still active -on pcmk-1. - ------ -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 17:31:47 2018 -Last change: Mon Sep 10 17:31:04 2018 by root via crm_resource on pcmk-1 - -2 nodes configured -2 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ------ - -To remove the constraint with the score of 50, we would first get the -constraint's ID using *pcs constraint --full*, then remove it with -*pcs constraint remove* and the ID. We won't show those steps here, -but feel free to try it on your own, with the help of the pcs man page -if necessary. diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Cluster-Setup.txt b/doc/Clusters_from_Scratch/en-US/Ch-Cluster-Setup.txt deleted file mode 100644 index 9cf6fbcce0..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Cluster-Setup.txt +++ /dev/null @@ -1,333 +0,0 @@ -:compat-mode: legacy -= Set up a Cluster = - -== Simplify Administration With a Cluster Shell == - -In the dark past, configuring Pacemaker required the administrator to -read and write XML. In true UNIX style, there were also a number of -different commands that specialized in different aspects of querying -and updating the cluster. - -In addition, the various components of the cluster stack (corosync, pacemaker, -etc.) had to be configured separately, with different configuration tools and -formats. - -All of that has been greatly simplified with the creation of higher-level tools, -whether command-line or GUIs, that hide all the mess underneath. - -Command-line cluster shells take all the individual aspects required for -managing and configuring a cluster, and pack them into one simple-to-use -command-line tool. - -They even allow you to queue up several changes at once and commit -them all at once. - -Two popular command-line shells are `pcs` and `crmsh`. Clusters from Scratch is -based on `pcs` because it comes with CentOS, but both have similar -functionality. Choosing a shell or GUI is a matter of personal preference and -what comes with (and perhaps is supported by) your choice of operating system. - - -== Install the Cluster Software == - -Fire up a shell on both nodes and run the following to install pacemaker, pcs, -and some other command-line tools that will make our lives easier: ----- -# yum install -y pacemaker pcs psmisc policycoreutils-python ----- - -[IMPORTANT] -=========== -This document will show commands that need to be executed on both nodes -with a simple `#` prompt. Be sure to run them on each node individually. -=========== - -[NOTE] -=========== -This document uses `pcs` for cluster management. Other alternatives, -such as `crmsh`, are available, but their syntax -will differ from the examples used here. -=========== - -== Configure the Cluster Software == - -=== Allow cluster services through firewall === - -On each node, allow cluster-related services through the local firewall: ----- -# firewall-cmd --permanent --add-service=high-availability -success -# firewall-cmd --reload -success ----- - -[NOTE] -====== -If you are using iptables directly, or some other firewall solution besides -firewalld, simply open the following ports, which can be used by various -clustering components: TCP ports 2224, 3121, and 21064, and UDP port 5405. - -If you run into any problems during testing, you might want to disable -the firewall and SELinux entirely until you have everything working. -This may create significant security issues and should not be performed on -machines that will be exposed to the outside world, but may be appropriate -during development and testing on a protected host. - -To disable security measures: ----- -[root@pcmk-1 ~]# setenforce 0 -[root@pcmk-1 ~]# sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config -[root@pcmk-1 ~]# systemctl mask firewalld.service -[root@pcmk-1 ~]# systemctl stop firewalld.service -[root@pcmk-1 ~]# iptables --flush ----- -====== - -=== Enable pcs Daemon === - -Before the cluster can be configured, the pcs daemon must be started and enabled -to start at boot time on each node. This daemon works with the pcs command-line interface -to manage synchronizing the corosync configuration across all nodes in the cluster. - -Start and enable the daemon by issuing the following commands on each node: - ----- -# systemctl start pcsd.service -# systemctl enable pcsd.service -Created symlink from /etc/systemd/system/multi-user.target.wants/pcsd.service to /usr/lib/systemd/system/pcsd.service. ----- - -The installed packages will create a *hacluster* user with a disabled password. -While this is fine for running `pcs` commands locally, -the account needs a login password in order to perform such tasks as syncing -the corosync configuration, or starting and stopping the cluster on other nodes. - -This tutorial will make use of such commands, -so now we will set a password for the *hacluster* user, using the same password -on both nodes: - ----- -# passwd hacluster -Changing password for user hacluster. -New password: -Retype new password: -passwd: all authentication tokens updated successfully. ----- - -[NOTE] -=========== -Alternatively, to script this process or set the password on a -different machine from the one you're logged into, you can use -the `--stdin` option for `passwd`: - ----- -[root@pcmk-1 ~]# ssh pcmk-2 -- 'echo mysupersecretpassword | passwd --stdin hacluster' ----- -=========== - -=== Configure Corosync === - -On either node, use `pcs cluster auth` to authenticate as the *hacluster* user: - ----- -[root@pcmk-1 ~]# pcs cluster auth pcmk-1 pcmk-2 -Username: hacluster -Password: -pcmk-2: Authorized -pcmk-1: Authorized ----- - -.Note -[NOTE] -==== -In Fedora 29 and CentOS 8.0, the command has been changed to `pcs host auth`: ----- -[root@pcmk-1 ~]# pcs host auth pcmk-1 pcmk-2 -Username: hacluster -Password: -pcmk-2: Authorized -pcmk-1: Authorized ----- -==== - -Next, use `pcs cluster setup` on the same node to generate and synchronize the -corosync configuration: ----- -[root@pcmk-1 ~]# pcs cluster setup --name mycluster pcmk-1 pcmk-2 -Destroying cluster on nodes: pcmk-1, pcmk-2... -pcmk-2: Stopping Cluster (pacemaker)... -pcmk-1: Stopping Cluster (pacemaker)... -pcmk-1: Successfully destroyed cluster -pcmk-2: Successfully destroyed cluster - -Sending 'pacemaker_remote authkey' to 'pcmk-1', 'pcmk-2' -pcmk-2: successful distribution of the file 'pacemaker_remote authkey' -pcmk-1: successful distribution of the file 'pacemaker_remote authkey' -Sending cluster config files to the nodes... -pcmk-1: Succeeded -pcmk-2: Succeeded - -Synchronizing pcsd certificates on nodes pcmk-1, pcmk-2... -pcmk-2: Success -pcmk-1: Success -Restarting pcsd on the nodes in order to reload the certificates... -pcmk-2: Success -pcmk-1: Success ----- - -.Note -[NOTE] -==== -In Fedora 29 and CentOS 8.0, the syntax has been changed and the +--name+ option -has been dropped: ----- -[root@pcmk-1 ~]# pcs cluster setup mycluster pcmk-1 pcmk-2 -No addresses specified for host 'pcmk-1', using 'pcmk-1' -No addresses specified for host 'pcmk-2', using 'pcmk-2' -Destroying cluster on hosts: 'pcmk-1', 'pcmk-2'... -pcmk-1: Successfully destroyed cluster -pcmk-2: Successfully destroyed cluster -Requesting remove 'pcsd settings' from 'pcmk-1', 'pcmk-2' -pcmk-1: successful removal of the file 'pcsd settings' -pcmk-2: successful removal of the file 'pcsd settings' -Sending 'corosync authkey', 'pacemaker authkey' to 'pcmk-1', 'pcmk-2' -pcmk-2: successful distribution of the file 'corosync authkey' -pcmk-2: successful distribution of the file 'pacemaker authkey' -pcmk-1: successful distribution of the file 'corosync authkey' -pcmk-1: successful distribution of the file 'pacemaker authkey' -Synchronizing pcsd SSL certificates on nodes 'pcmk-1', 'pcmk-2'... -pcmk-1: Success -pcmk-2: Success -Sending 'corosync.conf' to 'pcmk-1', 'pcmk-2' -pcmk-2: successful distribution of the file 'corosync.conf' -pcmk-1: successful distribution of the file 'corosync.conf' -Cluster has been successfully set up. ----- -==== - -If you received an authorization error for either of those commands, make -sure you configured the *hacluster* user account on each node -with the same password. - -[NOTE] -====== -If you are not using `pcs` for cluster administration, -follow whatever procedures are appropriate for your tools -to create a corosync.conf and copy it to all nodes. - -The `pcs` command will configure corosync to use UDP unicast transport; if you -choose to use multicast instead, choose a multicast address carefully. -footnote:[For some subtle issues, see -http://web.archive.org/web/20101211210054/http://29west.com/docs/THPM/multicast-address-assignment.html[Topics -in High-Performance Messaging: Multicast Address Assignment] or the more detailed treatment in -https://www.cisco.com/c/dam/en/us/support/docs/ip/ip-multicast/ipmlt_wp.pdf[Cisco's -Guidelines for Enterprise IP Multicast Address Allocation].] -====== - -The final corosync.conf configuration on each node should look -something like the sample in <>. - -== Explore pcs == - -Start by taking some time to familiarize yourself with what `pcs` can do. - ----- -[root@pcmk-1 ~]# pcs - -Usage: pcs [-f file] [-h] [commands]... -Control and configure pacemaker and corosync. - -Options: - -h, --help Display usage and exit. - -f file Perform actions on file instead of active CIB. - --debug Print all network traffic and external commands run. - --version Print pcs version information. List pcs capabilities if - --full is specified. - --request-timeout Timeout for each outgoing request to another node in - seconds. Default is 60s. - --force Override checks and errors, the exact behavior depends on - the command. WARNING: Using the --force option is - strongly discouraged unless you know what you are doing. - -Commands: - cluster Configure cluster options and nodes. - resource Manage cluster resources. - stonith Manage fence devices. - constraint Manage resource constraints. - property Manage pacemaker properties. - acl Manage pacemaker access control lists. - qdevice Manage quorum device provider on the local host. - quorum Manage cluster quorum settings. - booth Manage booth (cluster ticket manager). - status View cluster status. - config View and manage cluster configuration. - pcsd Manage pcs daemon. - node Manage cluster nodes. - alert Manage pacemaker alerts. - ----- - -As you can see, the different aspects of cluster management are separated -into categories. To discover the functionality available in each of these -categories, one can issue the command +pcs pass:[category] help+. Below -is an example of all the options available under the status category. - ----- -[root@pcmk-1 ~]# pcs status help - -Usage: pcs status [commands]... -View current cluster and resource status -Commands: - [status] [--full | --hide-inactive] - View all information about the cluster and resources (--full provides - more details, --hide-inactive hides inactive resources). - - resources [ | --full | --groups | --hide-inactive] - Show all currently configured resources or if a resource is specified - show the options for the configured resource. If --full is specified, - all configured resource options will be displayed. If --groups is - specified, only show groups (and their resources). If --hide-inactive - is specified, only show active resources. - - groups - View currently configured groups and their resources. - - cluster - View current cluster status. - - corosync - View current membership information as seen by corosync. - - quorum - View current quorum status. - - qdevice [--full] [] - Show runtime status of specified model of quorum device provider. Using - --full will give more detailed output. If is specified, - only information about the specified cluster will be displayed. - - nodes [corosync | both | config] - View current status of nodes from pacemaker. If 'corosync' is - specified, view current status of nodes from corosync instead. If - 'both' is specified, view current status of nodes from both corosync & - pacemaker. If 'config' is specified, print nodes from corosync & - pacemaker configuration. - - pcsd []... - Show current status of pcsd on nodes specified, or on all nodes - configured in the local cluster if no nodes are specified. - - xml - View xml version of status (output from crm_mon -r -1 -X). - ----- - -Additionally, if you are interested in the version and -supported cluster stack(s) available with your Pacemaker -installation, run: - ----- -[root@pcmk-1 ~]# pacemakerd --features -Pacemaker 1.1.18-11.el7_5.3 (Build: 2b07d5c5a9) - Supporting v3.0.14: generated-manpages agent-manpages ncurses libqb-logging libqb-ipc systemd nagios corosync-native atomic-attrd acls ----- diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Fencing.txt b/doc/Clusters_from_Scratch/en-US/Ch-Fencing.txt deleted file mode 100644 index 6987c69460..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Fencing.txt +++ /dev/null @@ -1,210 +0,0 @@ -:compat-mode: legacy -= Configure Fencing = - -== What is Fencing? == - -Fencing protects your data from being corrupted, and your application from -becoming unavailable, due to unintended concurrent access by rogue nodes. - -Just because a node is unresponsive doesn't mean it has stopped -accessing your data. The only way to be 100% sure that your data is -safe, is to use fencing to ensure that the node is truly -offline before allowing the data to be accessed from another node. - -Fencing also has a role to play in the event that a clustered service -cannot be stopped. In this case, the cluster uses fencing to force the -whole node offline, thereby making it safe to start the service -elsewhere. - -Fencing is also known as STONITH, an acronym for "Shoot The Other Node In The -Head", since the most popular form of fencing is cutting a host's power. - -In order to guarantee the safety of your data, -footnote:[If the data is corrupt, there is little point in continuing to make it available] -fencing is enabled by default. - -[NOTE] -==== -It is possible to tell the cluster not to use fencing, by setting the -*stonith-enabled* cluster option to false: ----- -[root@pcmk-1 ~]# pcs property set stonith-enabled=false -[root@pcmk-1 ~]# crm_verify -L ----- - -However, this is completely inappropriate for a production cluster. It tells -the cluster to simply pretend that failed nodes are safely powered off. Some -vendors will refuse to support clusters that have fencing disabled. Even -disabling it for a test cluster means you won't be able to test real failure -scenarios. -==== - -== Choose a Fence Device == - -The two broad categories of fence device are power fencing, which cuts off -power to the target, and fabric fencing, which cuts off the target's access to -some critical resource, such as a shared disk or access to the local network. - -Power fencing devices include: - -* Intelligent power switches -* IPMI -* Hardware watchdog device (alone, or in combination with shared storage used - as a "poison pill" mechanism) - -Fabric fencing devices include: - -* Shared storage that can be cut off for a target host by another host (for - example, an external storage device that supports SCSI-3 persistent - reservations) -* Intelligent network switches - -Using IPMI as a power fencing device may seem like a good choice. However, -if the IPMI shares power and/or network access with the host (such as most -onboard IPMI controllers), a power or network failure will cause both the -host and its fencing device to fail. The cluster will be unable to recover, -and must stop all resources to avoid a possible split-brain situation. - -Likewise, any device that relies on the machine being active (such as -SSH-based "devices" sometimes used during testing) is inappropriate, -because fencing will be required when the node is completely unresponsive. - -== Configure the Cluster for Fencing == - -. Install the fence agent(s). To see what packages are available, run `yum - search fence-`. Be sure to install the package(s) on all cluster nodes. - -. Configure the fence device itself to be able to fence your nodes and accept - fencing requests. This includes any necessary configuration on the device and - on the nodes, and any firewall or SELinux changes needed. Test the - communication between the device and your nodes. - -. Find the name of the correct fence agent: `pcs stonith list` - -. Find the parameters associated with the device: - +pcs stonith describe pass:[agent_name]+ - -. Create a local copy of the CIB: `pcs cluster cib stonith_cfg` - -. Create the fencing resource: +pcs -f stonith_cfg stonith create pass:[stonith_id - stonith_device_type [stonith_device_options]]+ -+ -Any flags that do not take arguments, such as +--ssl+, should be passed as +ssl=1+. - -. Enable fencing in the cluster: `pcs -f stonith_cfg property set stonith-enabled=true` - -. If the device does not know how to fence nodes based on their cluster node - name, you may also need to set the special *pcmk_host_map* parameter. See - `man pacemaker-fenced` for details. - -. If the device does not support the *list* command, you may also need - to set the special *pcmk_host_list* and/or *pcmk_host_check* - parameters. See `man pacemaker-fenced` for details. - -. If the device does not expect the victim to be specified with the - *port* parameter, you may also need to set the special - *pcmk_host_argument* parameter. See `man pacemaker-fenced` for details. - -. Commit the new configuration: `pcs cluster cib-push stonith_cfg` - -. Once the fence device resource is running, test it (you might want to stop - the cluster on that machine first): - +stonith_admin --reboot pass:[nodename]+ - -== Example == - -For this example, assume we have a chassis containing four nodes -and a separately powered IPMI device active on 10.0.0.1. Following the steps -above would go something like this: - -Step 1: Install the *fence-agents-ipmilan* package on both nodes. - -Step 2: Configure the IP address, authentication credentials, etc. in the IPMI device itself. - -Step 3: Choose the *fence_ipmilan* STONITH agent. - -Step 4: Obtain the agent's possible parameters: ----- -[root@pcmk-1 ~]# pcs stonith describe fence_ipmilan -fence_ipmilan - Fence agent for IPMI - -fence_ipmilan is an I/O Fencing agentwhich can be used with machines controlled by IPMI.This agent calls support software ipmitool (http://ipmitool.sf.net/). WARNING! This fence agent might report success before the node is powered off. You should use -m/method onoff if your fence device works correctly with that option. - -Stonith options: - ipport: TCP/UDP port to use for connection with device - hexadecimal_kg: Hexadecimal-encoded Kg key for IPMIv2 authentication - port: IP address or hostname of fencing device (together with --port-as-ip) - inet6_only: Forces agent to use IPv6 addresses only - ipaddr: IP Address or Hostname - passwd_script: Script to retrieve password - method: Method to fence (onoff|cycle) - inet4_only: Forces agent to use IPv4 addresses only - passwd: Login password or passphrase - lanplus: Use Lanplus to improve security of connection - auth: IPMI Lan Auth type. - cipher: Ciphersuite to use (same as ipmitool -C parameter) - target: Bridge IPMI requests to the remote target address - privlvl: Privilege level on IPMI device - timeout: Timeout (sec) for IPMI operation - login: Login Name - verbose: Verbose mode - debug: Write debug information to given file - power_wait: Wait X seconds after issuing ON/OFF - login_timeout: Wait X seconds for cmd prompt after login - delay: Wait X seconds before fencing is started - power_timeout: Test X seconds for status change after ON/OFF - ipmitool_path: Path to ipmitool binary - shell_timeout: Wait X seconds for cmd prompt after issuing command - port_as_ip: Make "port/plug" to be an alias to IP address - retry_on: Count of attempts to retry power on - sudo: Use sudo (without password) when calling 3rd party sotfware. - priority: The priority of the stonith resource. Devices are tried in order of highest priority to lowest. - pcmk_host_map: A mapping of host names to ports numbers for devices that do not support host names. Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and - 3 for node2 - pcmk_host_list: A list of machines controlled by this device (Optional unless pcmk_host_check=static-list). - pcmk_host_check: How to determine which machines are controlled by the device. Allowed values: dynamic-list (query the device), static-list (check the pcmk_host_list attribute), none - (assume every device can fence every machine) - pcmk_delay_max: Enable a random delay for stonith actions and specify the maximum of random delay. This prevents double fencing when using slow devices such as sbd. Use this to enable a - random delay for stonith actions. The overall delay is derived from this random delay value adding a static delay so that the sum is kept below the maximum delay. - pcmk_delay_base: Enable a base delay for stonith actions and specify base delay value. This prevents double fencing when different delays are configured on the nodes. Use this to enable - a static delay for stonith actions. The overall delay is derived from a random delay value adding this static delay so that the sum is kept below the maximum delay. - pcmk_action_limit: The maximum number of actions can be performed in parallel on this device Pengine property concurrent-fencing=true needs to be configured first. Then use this to - specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited. - -Default operations: - monitor: interval=60s ----- - -Step 5: `pcs cluster cib stonith_cfg` - -Step 6: Here are example parameters for creating our fence device resource: ----- -[root@pcmk-1 ~]# pcs -f stonith_cfg stonith create ipmi-fencing fence_ipmilan \ - pcmk_host_list="pcmk-1 pcmk-2" ipaddr=10.0.0.1 login=testuser \ - passwd=acd123 op monitor interval=60s -[root@pcmk-1 ~]# pcs -f stonith_cfg stonith - ipmi-fencing (stonith:fence_ipmilan): Stopped ----- - -Steps 7-10: Enable fencing in the cluster: ----- -[root@pcmk-1 ~]# pcs -f stonith_cfg property set stonith-enabled=true -[root@pcmk-1 ~]# pcs -f stonith_cfg property -Cluster Properties: - cluster-infrastructure: corosync - cluster-name: mycluster - dc-version: 1.1.18-11.el7_5.3-2b07d5c5a9 - have-watchdog: false - stonith-enabled: true ----- - -Step 11: `pcs cluster cib-push stonith_cfg --config` - -Step 12: Test: ----- -[root@pcmk-1 ~]# pcs cluster stop pcmk-2 -[root@pcmk-1 ~]# stonith_admin --reboot pcmk-2 ----- - -After a successful test, login to any rebooted nodes, and start the cluster -(with `pcs cluster start`). diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt deleted file mode 100644 index a91824ac32..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt +++ /dev/null @@ -1,358 +0,0 @@ -:compat-mode: legacy -= Installation = - -== Install &DISTRO; &DISTRO_VERSION; == - -=== Boot the Install Image === - -Download the 4GB -http://isoredirect.centos.org/centos/7/isos/x86_64/CentOS-7-x86_64-DVD-1804.iso[&DISTRO; -&DISTRO_VERSION; DVD ISO]. Use the image to boot a virtual machine, or -burn it to a DVD or USB drive and boot a physical server from that. - -After starting the installation, select your language and keyboard layout at -the welcome screen. - -.&DISTRO; &DISTRO_VERSION; Installation Welcome Screen -image::images/Welcome.png["Welcome to &DISTRO; &DISTRO_VERSION;",align="center",scaledwidth="100%"] - -=== Installation Options === - -At this point, you get a chance to tweak the default installation options. - -.&DISTRO; &DISTRO_VERSION; Installation Summary Screen -image::images/Installer.png["&DISTRO; &DISTRO_VERSION; Installation Summary",align="center",scaledwidth="100%"] - -Ignore the *SOFTWARE SELECTION* section (try saying that 10 times quickly). The -*Infrastructure Server* environment does have add-ons with much of the software -we need, but we will leave it as a *Minimal Install* here, so that we can see -exactly what software is required later. - -=== Configure Network === - -In the *NETWORK & HOSTNAME* section: - -- Edit *Host Name:* as desired. For this example, we will use - *pcmk-1.localdomain*. -- Select your network device, press *Configure...*, and manually assign a fixed - IP address. For this example, we'll use 192.168.122.101 under *IPv4 Settings* - (with an appropriate netmask, gateway and DNS server). -- Flip the switch to turn your network device on, and press *Done*. - -.&DISTRO; &DISTRO_VERSION; Network Interface Screen -image::images/Editing-eth0.png["&DISTRO; &DISTRO_VERSION; Editing eth0",align="center",scaledwidth="100%"] - -[IMPORTANT] -=========== -Do not accept the default network settings. -Cluster machines should never obtain an IP address via DHCP, because -DHCP's periodic address renewal will interfere with corosync. -=========== - -=== Configure Disk === - -By default, the installer's automatic partitioning will use LVM (which allows -us to dynamically change the amount of space allocated to a given partition). -However, it allocates all free space to the +/+ (aka. *root*) partition, which -cannot be reduced in size later (dynamic increases are fine). - -In order to follow the DRBD and GFS2 portions of this guide, we need to reserve -space on each machine for a replicated volume. - -Enter the *INSTALLATION DESTINATION* section, ensure the hard drive you want to -install to is selected, select *I will configure partitioning*, and press *Done*. - -In the *MANUAL PARTITIONING* screen that comes next, click the option to create -mountpoints automatically. Select the +/+ mountpoint, and reduce the desired -capacity by 1GiB or so. Select *Modify...* by the volume group name, and change -the *Size policy:* to *As large as possible*, to make the reclaimed space -available inside the LVM volume group. We'll add the additional volume later. - -.&DISTRO; &DISTRO_VERSION; Manual Partitioning Screen -image::images/Partitioning.png["&DISTRO; &DISTRO_VERSION; Partitioning",align="center",scaledwidth="100%"] - -Press *Done*, then *Accept changes*. - -=== Configure Time Synchronization === - -It is highly recommended to enable NTP on your cluster nodes. Doing so -ensures all nodes agree on the current time and makes reading log files -significantly easier. - -&DISTRO; will enable NTP automatically. If you want to change any time-related -settings (such as time zone or NTP server), you can do this in the -*TIME & DATE* section. - -=== Finish Install === - -Select *Begin Installation*. Once it completes, set a root password, and reboot -as instructed. For the purposes of this document, it is not necessary to create -any additional users. After the node reboots, you'll see a login prompt on -the console. Login using *root* and the password you created earlier. - -.&DISTRO; &DISTRO_VERSION; Console Prompt -image::images/Console.png["&DISTRO; &DISTRO_VERSION; Console",align="center",scaledwidth="100%"] - -[NOTE] -====== - -From here on, we're going to be working exclusively from the terminal. - -====== - -== Configure the OS == - -=== Verify Networking === - -Ensure that the machine has the static IP address you configured earlier. - ------ -[root@pcmk-1 ~]# ip addr -1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 127.0.0.1/8 scope host lo - valid_lft forever preferred_lft forever - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever -2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 52:54:00:8e:eb:41 brd ff:ff:ff:ff:ff:ff - inet 192.168.122.101/24 brd 192.168.122.255 scope global noprefixroute eth0 - valid_lft forever preferred_lft forever - inet6 fe80::e45:c99b:34c0:c657/64 scope link noprefixroute - valid_lft forever preferred_lft forever ------ - -[NOTE] -===== -If you ever need to change the node's IP address from the command line, follow -these instructions, replacing *${device}* with the name of your network device: - -.... -[root@pcmk-1 ~]# vi /etc/sysconfig/network-scripts/ifcfg-${device} # manually edit as desired -[root@pcmk-1 ~]# nmcli dev disconnect ${device} -[root@pcmk-1 ~]# nmcli con reload ${device} -[root@pcmk-1 ~]# nmcli con up ${device} -.... - -This makes *NetworkManager* aware that a change was made on the config file. - -===== - -Next, ensure that the routes are as expected: - ------ -[root@pcmk-1 ~]# ip route -default via 192.168.122.1 dev eth0 proto static metric 100 -192.168.122.0/24 dev eth0 proto kernel scope link src 192.168.122.101 metric 100 ------ - -If there is no line beginning with *default via*, then you may need to add a line such as - -[source,Bash] -GATEWAY="192.168.122.1" - -to the device configuration using the same process as described above for -changing the IP address. - -Now, check for connectivity to the outside world. Start small by -testing whether we can reach the gateway we configured. - ------ -[root@pcmk-1 ~]# ping -c 1 192.168.122.1 -PING 192.168.122.1 (192.168.122.1) 56(84) bytes of data. -64 bytes from 192.168.122.1: icmp_seq=1 ttl=64 time=0.254 ms - ---- 192.168.122.1 ping statistics --- -1 packets transmitted, 1 received, 0% packet loss, time 0ms -rtt min/avg/max/mdev = 0.254/0.254/0.254/0.000 ms ------ - -Now try something external; choose a location you know should be available. - ------ -[root@pcmk-1 ~]# ping -c 1 www.clusterlabs.org -PING oss-uk-1.clusterlabs.org (109.74.197.241) 56(84) bytes of data. -64 bytes from oss-uk-1.clusterlabs.org (109.74.197.241): icmp_seq=1 ttl=49 time=333 ms - ---- oss-uk-1.clusterlabs.org ping statistics --- -1 packets transmitted, 1 received, 0% packet loss, time 0ms -rtt min/avg/max/mdev = 333.204/333.204/333.204/0.000 ms ------ - -=== Login Remotely === - -The console isn't a very friendly place to work from, so we will now -switch to accessing the machine remotely via SSH where we can -use copy and paste, etc. - -From another host, check whether we can see the new host at all: - ------ -beekhof@f16 ~ # ping -c 1 192.168.122.101 -PING 192.168.122.101 (192.168.122.101) 56(84) bytes of data. -64 bytes from 192.168.122.101: icmp_req=1 ttl=64 time=1.01 ms - ---- 192.168.122.101 ping statistics --- -1 packets transmitted, 1 received, 0% packet loss, time 0ms -rtt min/avg/max/mdev = 1.012/1.012/1.012/0.000 ms ------ - -Next, login as root via SSH. - ------ -beekhof@f16 ~ # ssh -l root 192.168.122.101 -The authenticity of host '192.168.122.101 (192.168.122.101)' can't be established. -ECDSA key fingerprint is 6e:b7:8f:e2:4c:94:43:54:a8:53:cc:20:0f:29:a4:e0. -Are you sure you want to continue connecting (yes/no)? yes -Warning: Permanently added '192.168.122.101' (ECDSA) to the list of known hosts. -root@192.168.122.101's password: -Last login: Tue Aug 11 13:14:39 2015 -[root@pcmk-1 ~]# ------ - -=== Apply Updates === - -Apply any package updates released since your installation image was created: ----- -[root@pcmk-1 ~]# yum update ----- - -=== Use Short Node Names === - -During installation, we filled in the machine's fully qualified domain -name (FQDN), which can be rather long when it appears in cluster logs and -status output. See for yourself how the machine identifies itself: -(((Nodes, short name))) - ----- -[root@pcmk-1 ~]# uname -n -pcmk-1.localdomain ----- -(((Nodes, Domain name (Query)))) - -We can use the `hostnamectl` tool to strip off the domain name: ----- -[root@pcmk-1 ~]# hostnamectl set-hostname $(uname -n | sed s/\\..*//) ----- -(((Nodes, Domain name (Remove from host name)))) - -Now, check that the machine is using the correct name: ----- -[root@pcmk-1 ~]# uname -n -pcmk-1 ----- - -You may want to reboot to ensure all updates take effect. - -== Repeat for Second Node == - -Repeat the Installation steps so far, so that you have two -nodes ready to have the cluster software installed. - -For the purposes of this document, the additional node is called -pcmk-2 with address 192.168.122.102. - -== Configure Communication Between Nodes == - -=== Configure Host Name Resolution === - -Confirm that you can communicate between the two new nodes: - ----- -[root@pcmk-1 ~]# ping -c 3 192.168.122.102 -PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data. -64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.343 ms -64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.402 ms -64 bytes from 192.168.122.102: icmp_seq=3 ttl=64 time=0.558 ms - ---- 192.168.122.102 ping statistics --- -3 packets transmitted, 3 received, 0% packet loss, time 2000ms -rtt min/avg/max/mdev = 0.343/0.434/0.558/0.092 ms ----- - -Now we need to make sure we can communicate with the machines by their -name. If you have a DNS server, add additional entries for the two -machines. Otherwise, you'll need to add the machines to +/etc/hosts+ -on both nodes. Below are the entries for my cluster nodes: - ----- -[root@pcmk-1 ~]# grep pcmk /etc/hosts -192.168.122.101 pcmk-1.clusterlabs.org pcmk-1 -192.168.122.102 pcmk-2.clusterlabs.org pcmk-2 ----- - -We can now verify the setup by again using ping: - ----- -[root@pcmk-1 ~]# ping -c 3 pcmk-2 -PING pcmk-2.clusterlabs.org (192.168.122.101) 56(84) bytes of data. -64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=1 ttl=64 time=0.164 ms -64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=2 ttl=64 time=0.475 ms -64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=3 ttl=64 time=0.186 ms - ---- pcmk-2.clusterlabs.org ping statistics --- -3 packets transmitted, 3 received, 0% packet loss, time 2001ms -rtt min/avg/max/mdev = 0.164/0.275/0.475/0.141 ms ----- - -=== Configure SSH === - -SSH is a convenient and secure way to copy files and perform commands -remotely. For the purposes of this guide, we will create a key without a -password (using the -N option) so that we can perform remote actions -without being prompted. - -(((SSH))) - -[WARNING] -========= -Unprotected SSH keys (those without a password) are not recommended for servers exposed to the outside world. -We use them here only to simplify the demo. -========= - -Create a new key and allow anyone with that key to log in: - -.Creating and Activating a new SSH Key ----- -[root@pcmk-1 ~]# ssh-keygen -t dsa -f ~/.ssh/id_dsa -N "" -Generating public/private dsa key pair. -Your identification has been saved in /root/.ssh/id_dsa. -Your public key has been saved in /root/.ssh/id_dsa.pub. -The key fingerprint is: -91:09:5c:82:5a:6a:50:08:4e:b2:0c:62:de:cc:74:44 root@pcmk-1.clusterlabs.org -The key's randomart image is: -+--[ DSA 1024]----+ -|==.ooEo.. | -|X O + .o o | -| * A + | -| + . | -| . S | -| | -| | -| | -| | -+-----------------+ -[root@pcmk-1 ~]# cp ~/.ssh/id_dsa.pub ~/.ssh/authorized_keys ----- -(((Creating and Activating a new SSH Key))) - -Install the key on the other node: ----- -[root@pcmk-1 ~]# scp -r ~/.ssh pcmk-2: -The authenticity of host 'pcmk-2 (192.168.122.102)' can't be established. -ECDSA key fingerprint is SHA256:63xNPkPYq98rYznf3T9QYJAzlaGiAsSgFVNHOZjPWqc. -ECDSA key fingerprint is MD5:d9:bf:6e:32:88:be:47:3d:96:f1:96:27:65:05:0b:c3. -Are you sure you want to continue connecting (yes/no)? yes -Warning: Permanently added 'pcmk-2,192.168.122.102' (ECDSA) to the list of known hosts. -root@pcmk-2's password: -id_dsa -id_dsa.pub -authorized_keys -known_hosts ----- - -Test that you can now run commands remotely, without being prompted: ----- -[root@pcmk-1 ~]# ssh pcmk-2 -- uname -n -pcmk-2 ----- diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Intro.txt b/doc/Clusters_from_Scratch/en-US/Ch-Intro.txt deleted file mode 100644 index 60ca19e900..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Intro.txt +++ /dev/null @@ -1,28 +0,0 @@ -:compat-mode: legacy -= Read-Me-First = - -== The Scope of this Document == - -Computer clusters can be used to provide highly available services or -resources. The redundancy of multiple machines is used to guard -against failures of many types. - -This document will walk through the installation and setup of simple -clusters using the &DISTRO; distribution, version &DISTRO_VERSION;. - -The clusters described here will use Pacemaker and Corosync to provide -resource management and messaging. Required packages and modifications -to their configuration files are described along with the use of the -Pacemaker command line tool for generating the XML used for cluster -control. - -Pacemaker is a central component and provides the resource management -required in these systems. This management includes detecting and -recovering from the failure of various nodes, resources and services -under its control. - -When more in-depth information is required, and for real-world usage, -please refer to the -https://www.clusterlabs.org/pacemaker/doc/[Pacemaker Explained] manual. - -include::../../shared/en-US/pacemaker-intro.txt[] diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt b/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt deleted file mode 100644 index f9287da3cf..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt +++ /dev/null @@ -1,614 +0,0 @@ -:compat-mode: legacy -= Replicate Storage Using DRBD = - -Even if you're serving up static websites, having to manually synchronize -the contents of that website to all the machines in the cluster is not -ideal. For dynamic websites, such as a wiki, it's not even an option. Not -everyone care afford network-attached storage, but somehow the data needs -to be kept in sync. - -Enter DRBD, which can be thought of as network-based RAID-1. -footnote:[See http://www.drbd.org/ for details.] - -== Install the DRBD Packages == - -DRBD itself is included in the upstream kernel,footnote:[Since version 2.6.33] -but we do need some utilities to use it effectively. - -CentOS does not ship these utilities, so we need to enable a third-party -repository to get them. Supported packages for many OSes are available from -DRBD's maker http://www.linbit.com/[LINBIT], but here we'll use the free -http://elrepo.org/[ELRepo] repository. - -On both nodes, import the ELRepo package signing key, and enable the -repository: ----- -# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org -# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm -Retrieving http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm -Preparing... ################################# [100%] -Updating / installing... - 1:elrepo-release-7.0-3.el7.elrepo ################################# [100%] ----- - -Now, we can install the DRBD kernel module and utilities: ----- -# yum install -y kmod-drbd84 drbd84-utils ----- - -DRBD will not be able to run under the default SELinux security policies. -If you are familiar with SELinux, you can modify the policies in a more -fine-grained manner, but here we will simply exempt DRBD processes from SELinux -control: ----- -# semanage permissive -a drbd_t ----- - -We will configure DRBD to use port 7789, so allow that port from each host to -the other: ----- -[root@pcmk-1 ~]# firewall-cmd --permanent --add-rich-rule='rule family="ipv4" \ - source address="192.168.122.102" port port="7789" protocol="tcp" accept' -success -[root@pcmk-1 ~]# firewall-cmd --reload -success ----- ----- -[root@pcmk-2 ~]# firewall-cmd --permanent --add-rich-rule='rule family="ipv4" \ - source address="192.168.122.101" port port="7789" protocol="tcp" accept' -success -[root@pcmk-2 ~]# firewall-cmd --reload -success ----- - -[NOTE] -====== -In this example, we have only two nodes, and all network traffic is on the same LAN. -In production, it is recommended to use a dedicated, isolated network for cluster-related traffic, -so the firewall configuration would likely be different; one approach would be to -add the dedicated network interfaces to the trusted zone. -====== - -== Allocate a Disk Volume for DRBD == - -DRBD will need its own block device on each node. This can be -a physical disk partition or logical volume, of whatever size -you need for your data. For this document, we will use a 512MiB logical volume, -which is more than sufficient for a single HTML file and (later) GFS2 metadata. - ----- -[root@pcmk-1 ~]# vgdisplay | grep -e Name -e Free - VG Name centos_pcmk-1 - Free PE / Size 255 / 1020.00 MiB -[root@pcmk-1 ~]# lvcreate --name drbd-demo --size 512M centos_pcmk-1 - Logical volume "drbd-demo" created. -[root@pcmk-1 ~]# lvs - LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert - drbd-demo centos_pcmk-1 -wi-a----- 512.00m - root centos_pcmk-1 -wi-ao---- 3.00g - swap centos_pcmk-1 -wi-ao---- 1.00g ----- - -Repeat for the second node, making sure to use the same size: - ----- -[root@pcmk-1 ~]# ssh pcmk-2 -- lvcreate --name drbd-demo --size 512M centos_pcmk-2 - Logical volume "drbd-demo" created. ----- - -== Configure DRBD == - -There is no series of commands for building a DRBD configuration, so simply -run this on both nodes to use this sample configuration: - ----- -# cat </etc/drbd.d/wwwdata.res -resource wwwdata { - protocol C; - meta-disk internal; - device /dev/drbd1; - syncer { - verify-alg sha1; - } - net { - allow-two-primaries; - } - on pcmk-1 { - disk /dev/centos_pcmk-1/drbd-demo; - address 192.168.122.101:7789; - } - on pcmk-2 { - disk /dev/centos_pcmk-2/drbd-demo; - address 192.168.122.102:7789; - } -} -END ----- - -[IMPORTANT] -========= -Edit the file to use the hostnames, IP addresses and logical volume paths -of your nodes if they differ from the ones used in this guide. -========= - -[NOTE] -======= -Detailed information on the directives used in this configuration (and -other alternatives) is available in the -https://docs.linbit.com/docs/users-guide-8.4/#ch-configure[DRBD User's Guide]. -The *allow-two-primaries* option would not normally be used in -an active/passive cluster. We are adding it here for the convenience -of changing to an active/active cluster later. -======= - -== Initialize DRBD == - -With the configuration in place, we can now get DRBD running. - -These commands create the local metadata for the DRBD resource, -ensure the DRBD kernel module is loaded, and bring up the DRBD resource. -Run them on one node: - ----- -[root@pcmk-1 ~]# drbdadm create-md wwwdata - - - - - - - - - - - - - - - - - - - - --== Thank you for participating in the global usage survey ==-- -The server's response is: - -you are the 2147th user to install this version -initializing activity log -initializing bitmap (16 KB) to all zero -Writing meta data... -New drbd meta data block successfully created. -success -[root@pcmk-1 ~]# modprobe drbd -[root@pcmk-1 ~]# drbdadm up wwwdata - - - - - - - - - - - - - - - - - - - --== Thank you for participating in the global usage survey ==-- -The server's response is: - ----- - -We can confirm DRBD's status on this node: - ----- -[root@pcmk-1 ~]# cat /proc/drbd -version: 8.4.11-1 (api:1/proto:86-101) -GIT-hash: 66145a308421e9c124ec391a7848ac20203bb03c build by mockbuild@, 2018-04-26 12:10:42 - - 1: cs:WFConnection ro:Secondary/Unknown ds:Inconsistent/DUnknown C r----s - ns:0 nr:0 dw:0 dr:0 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:524236 ----- - -Because we have not yet initialized the data, this node's data -is marked as *Inconsistent*. Because we have not yet initialized -the second node, the local state is *WFConnection* (waiting for connection), -and the partner node's status is marked as *Unknown*. - -Now, repeat the above commands on the second node, starting with creating -wwwdata.res. After giving it time to connect, when we check the status, it -shows: - ----- -[root@pcmk-2 ~]# cat /proc/drbd -version: 8.4.11-1 (api:1/proto:86-101) -GIT-hash: 66145a308421e9c124ec391a7848ac20203bb03c build by mockbuild@, 2018-04-26 12:10:42 - - 1: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r----- - ns:0 nr:0 dw:0 dr:0 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:524236 ----- - -You can see the state has changed to *Connected*, meaning the two DRBD nodes -are communicating properly, and both nodes are in *Secondary* role -with *Inconsistent* data. - -To make the data consistent, we need to tell DRBD which node should be -considered to have the correct data. In this case, since we are creating -a new resource, both have garbage, so we'll just pick pcmk-1 -and run this command on it: - ----- -[root@pcmk-1 ~]# drbdadm primary --force wwwdata ----- - -[NOTE] -====== -If you are using a different version of DRBD, the required syntax may be different. -See the documentation for your version for how to perform these commands. -====== - -If we check the status immediately, we'll see something like this: ----- -[root@pcmk-1 ~]# cat /proc/drbd -version: 8.4.11-1 (api:1/proto:86-101) -GIT-hash: 66145a308421e9c124ec391a7848ac20203bb03c build by mockbuild@, 2018-04-26 12:10:42 - - 1: cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r----- - ns:43184 nr:0 dw:0 dr:45312 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:481052 - [>...................] sync'ed: 8.6% (481052/524236)K - finish: 0:01:51 speed: 4,316 (4,316) K/sec ----- - -We can see that this node has the *Primary* role, the partner node has -the *Secondary* role, this node's data is now considered *UpToDate*, -the partner node's data is still *Inconsistent*, and a progress bar -shows how far along the partner node is in synchronizing the data. - -After a while, the sync should finish, and you'll see something like: ----- -[root@pcmk-1 ~]# cat /proc/drbd -version: 8.4.11-1 (api:1/proto:86-101) -GIT-hash: 66145a308421e9c124ec391a7848ac20203bb03c build by mockbuild@, 2018-04-26 12:10:42 - - 1: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r----- - ns:524236 nr:0 dw:0 dr:526364 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0 ----- - -Both sets of data are now *UpToDate*, and we can proceed to creating -and populating a filesystem for our WebSite resource's documents. - -== Populate the DRBD Disk == - -On the node with the primary role (pcmk-1 in this example), -create a filesystem on the DRBD device: - ----- -[root@pcmk-1 ~]# mkfs.xfs /dev/drbd1 -meta-data=/dev/drbd1 isize=512 agcount=4, agsize=32765 blks - = sectsz=512 attr=2, projid32bit=1 - = crc=1 finobt=0, sparse=0 -data = bsize=4096 blocks=131059, imaxpct=25 - = sunit=0 swidth=0 blks -naming =version 2 bsize=4096 ascii-ci=0 ftype=1 -log =internal log bsize=4096 blocks=855, version=2 - = sectsz=512 sunit=0 blks, lazy-count=1 -realtime =none extsz=4096 blocks=0, rtextents=0 ----- - -[NOTE] -==== -In this example, we create an xfs filesystem with no special options. -In a production environment, you should choose a filesystem type and -options that are suitable for your application. -==== - -Mount the newly created filesystem, populate it with our web document, -give it the same SELinux policy as the web document root, -then unmount it (the cluster will handle mounting and unmounting it later): - ----- -[root@pcmk-1 ~]# mount /dev/drbd1 /mnt -[root@pcmk-1 ~]# cat <<-END >/mnt/index.html - - My Test Site - DRBD - -END -[root@pcmk-1 ~]# chcon -R --reference=/var/www/html /mnt -[root@pcmk-1 ~]# umount /dev/drbd1 ----- - -== Configure the Cluster for the DRBD device == - -One handy feature `pcs` has is the ability to queue up several changes -into a file and commit those changes all at once. To do this, start by -populating the file with the current raw XML config from the CIB. - ----- -[root@pcmk-1 ~]# pcs cluster cib drbd_cfg ----- - -Using pcs's `-f` option, make changes to the configuration saved -in the +drbd_cfg+ file. These changes will not be seen by the cluster until -the +drbd_cfg+ file is pushed into the live cluster's CIB later. - -Here, we create a cluster resource for the DRBD device, and an additional _clone_ -resource to allow the resource to run on both nodes at the same time. - ----- -[root@pcmk-1 ~]# pcs -f drbd_cfg resource create WebData ocf:linbit:drbd \ - drbd_resource=wwwdata op monitor interval=60s -[root@pcmk-1 ~]# pcs -f drbd_cfg resource master WebDataClone WebData \ - master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 \ - notify=true -[root@pcmk-1 ~]# pcs -f drbd_cfg resource show - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - Master/Slave Set: WebDataClone [WebData] - Stopped: [ pcmk-1 pcmk-2 ] ----- - -.Note -[NOTE] -==== -In Fedora 29 and CentOS 8.0, 'master' resources have been renamed to -'promotable clone' resources and the `pcs` command has been changed -accordingly: ----- -[root@pcmk-1 ~]# pcs -f drbd_cfg resource promotable WebData \ - promoted-max=1 promoted-node-max=1 clone-max=2 clone-node-max=1 \ - notify=true ----- -The new command does not allow to set a custom name for the resulting -promotable resource. `Pcs` automatically creates a name for the resource in -the form of *pass:[resource_name]-clone*, that is -*WebData-clone* in this case. - -To avoid confusion whether the +pcs resource show+ command displays resources' -status or configuration, the command has been deprecated in Fedora 29 and -CentOS 8.0. Two new commands have been introduced for displaying resources' -status and configuration: `pcs resource status` and `pcs resource config`, -respectively. -==== - -After you are satisfied with all the changes, you can commit -them all at once by pushing the drbd_cfg file into the live CIB. - ----- -[root@pcmk-1 ~]# pcs cluster cib-push drbd_cfg --config -CIB updated ----- - -Let's see what the cluster did with the new configuration: ----- -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 17:58:07 2018 -Last change: Mon Sep 10 17:57:53 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -4 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 ] - Slaves: [ pcmk-2 ] - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -We can see that *WebDataClone* (our DRBD device) is running as master (DRBD's -primary role) on *pcmk-1* and slave (DRBD's secondary role) on *pcmk-2*. - -[IMPORTANT] -==== -The resource agent should load the DRBD module when needed if it's not already -loaded. If that does not happen, configure your operating system to load the -module at boot time. For &DISTRO; &DISTRO_VERSION;, you would run this on both -nodes: ----- -# echo drbd >/etc/modules-load.d/drbd.conf ----- -==== - -== Configure the Cluster for the Filesystem == - -Now that we have a working DRBD device, we need to mount its filesystem. - -In addition to defining the filesystem, we also need to -tell the cluster where it can be located (only on the DRBD Primary) -and when it is allowed to start (after the Primary was promoted). - -We are going to take a shortcut when creating the resource this time. -Instead of explicitly saying we want the *ocf:heartbeat:Filesystem* script, we -are only going to ask for *Filesystem*. We can do this because we know there is only -one resource script named *Filesystem* available to pacemaker, and that pcs is smart -enough to fill in the *ocf:heartbeat:* portion for us correctly in the configuration. -If there were multiple *Filesystem* scripts from different OCF providers, we would need -to specify the exact one we wanted. - -Once again, we will queue our changes to a file and then push the -new configuration to the cluster as the final step. - ----- -[root@pcmk-1 ~]# pcs cluster cib fs_cfg -[root@pcmk-1 ~]# pcs -f fs_cfg resource create WebFS Filesystem \ - device="/dev/drbd1" directory="/var/www/html" fstype="xfs" -Assumed agent name 'ocf:heartbeat:Filesystem' (deduced from 'Filesystem') -[root@pcmk-1 ~]# pcs -f fs_cfg constraint colocation add \ - WebFS with WebDataClone INFINITY with-rsc-role=Master -[root@pcmk-1 ~]# pcs -f fs_cfg constraint order \ - promote WebDataClone then start WebFS -Adding WebDataClone WebFS (kind: Mandatory) (Options: first-action=promote then-action=start) ----- - -We also need to tell the cluster that Apache needs to run on the same -machine as the filesystem and that it must be active before Apache can -start. - ----- -[root@pcmk-1 ~]# pcs -f fs_cfg constraint colocation add WebSite with WebFS INFINITY -[root@pcmk-1 ~]# pcs -f fs_cfg constraint order WebFS then WebSite -Adding WebFS WebSite (kind: Mandatory) (Options: first-action=start then-action=start) ----- - -Review the updated configuration. - ----- -[root@pcmk-1 ~]# pcs -f fs_cfg constraint -Location Constraints: - Resource: WebSite - Enabled on: pcmk-1 (score:50) -Ordering Constraints: - start ClusterIP then start WebSite (kind:Mandatory) - promote WebDataClone then start WebFS (kind:Mandatory) - start WebFS then start WebSite (kind:Mandatory) -Colocation Constraints: - WebSite with ClusterIP (score:INFINITY) - WebFS with WebDataClone (score:INFINITY) (with-rsc-role:Master) - WebSite with WebFS (score:INFINITY) -Ticket Constraints: -[root@pcmk-1 ~]# pcs -f fs_cfg resource show - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 ] - Slaves: [ pcmk-2 ] - WebFS (ocf::heartbeat:Filesystem): Stopped ----- - -After reviewing the new configuration, upload it and watch the -cluster put it into effect. - ----- -[root@pcmk-1 ~]# pcs cluster cib-push fs_cfg --config -CIB updated -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 18:02:24 2018 -Last change: Mon Sep 10 18:02:14 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -5 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 - WebSite (ocf::heartbeat:apache): Started pcmk-1 - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-1 ] - Slaves: [ pcmk-2 ] - WebFS (ocf::heartbeat:Filesystem): Started pcmk-1 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -== Test Cluster Failover == - -Previously, we used `pcs cluster stop pcmk-1` to stop all cluster -services on *pcmk-1*, failing over the cluster resources, but there is another -way to safely simulate node failure. - -We can put the node into _standby mode_. Nodes in this state continue to -run corosync and pacemaker but are not allowed to run resources. Any resources -found active there will be moved elsewhere. This feature can be particularly -useful when performing system administration tasks such as updating packages -used by cluster resources. - -Put the active node into standby mode, and observe the cluster move all -the resources to the other node. The node's status will change to indicate that -it can no longer host resources, and eventually all the resources will move. - ----- -[root@pcmk-1 ~]# pcs cluster standby pcmk-1 -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 18:04:22 2018 -Last change: Mon Sep 10 18:03:43 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -5 resources configured - -Node pcmk-1: standby -Online: [ pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 - WebSite (ocf::heartbeat:apache): Started pcmk-2 - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-2 ] - Stopped: [ pcmk-1 ] - WebFS (ocf::heartbeat:Filesystem): Started pcmk-2 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -Once we've done everything we needed to on pcmk-1 (in this case nothing, -we just wanted to see the resources move), we can allow the node to be a -full cluster member again. - ----- -[root@pcmk-1 ~]# pcs cluster unstandby pcmk-1 -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 18:05:22 2018 -Last change: Mon Sep 10 18:05:21 2018 by root via cibadmin on pcmk-1 - -2 nodes configured -5 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -Full list of resources: - - ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 - WebSite (ocf::heartbeat:apache): Started pcmk-2 - Master/Slave Set: WebDataClone [WebData] - Masters: [ pcmk-2 ] - Slaves: [ pcmk-1 ] - WebFS (ocf::heartbeat:Filesystem): Started pcmk-2 - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -Notice that *pcmk-1* is back to the *Online* state, and that the cluster resources -stay where they are due to our resource stickiness settings configured earlier. - -.Note -[NOTE] -==== -Since Fedora 29 and CentOS 8.0, the commands for controlling standby mode are -`pcs node standby` and `pcs node unstandby`. -==== diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt b/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt deleted file mode 100644 index e21688dbc0..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt +++ /dev/null @@ -1,210 +0,0 @@ -:compat-mode: legacy -= Start and Verify Cluster = - -== Start the Cluster == - -Now that corosync is configured, it is time to start the cluster. -The command below will start corosync and pacemaker on both nodes -in the cluster. If you are issuing the start command from a different -node than the one you ran the `pcs cluster auth` command on earlier, you -must authenticate on the current node you are logged into before you will -be allowed to start the cluster. - ----- -[root@pcmk-1 ~]# pcs cluster start --all -pcmk-1: Starting Cluster... -pcmk-2: Starting Cluster... ----- - -[NOTE] -====== -An alternative to using the `pcs cluster start --all` command -is to issue either of the below command sequences on each node in the -cluster separately: - ----- -# pcs cluster start -Starting Cluster... ----- - -or - ----- -# systemctl start corosync.service -# systemctl start pacemaker.service ----- -====== - -[IMPORTANT] -==== -In this example, we are not enabling the corosync and pacemaker services -to start at boot. If a cluster node fails or is rebooted, you will need to run -+pcs cluster start pass:[nodename]+ (or `--all`) to start the cluster on it. -While you could enable the services to start at boot, requiring a manual -start of cluster services gives you the opportunity to do a post-mortem investigation -of a node failure before returning it to the cluster. -==== - -== Verify Corosync Installation == - -First, use `corosync-cfgtool` to check whether cluster communication is happy: - ----- -[root@pcmk-1 ~]# corosync-cfgtool -s -Printing ring status. -Local node ID 1 -RING ID 0 - id = 192.168.122.101 - status = ring 0 active with no faults ----- - -We can see here that everything appears normal with our fixed IP -address (not a 127.0.0.x loopback address) listed as the *id*, and *no -faults* for the status. - -If you see something different, you might want to start by checking -the node's network, firewall and SELinux configurations. - -Next, check the membership and quorum APIs: - ----- -[root@pcmk-1 ~]# corosync-cmapctl | grep members -runtime.totem.pg.mrp.srp.members.1.config_version (u64) = 0 -runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.122.101) -runtime.totem.pg.mrp.srp.members.1.join_count (u32) = 1 -runtime.totem.pg.mrp.srp.members.1.status (str) = joined -runtime.totem.pg.mrp.srp.members.2.config_version (u64) = 0 -runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.122.102) -runtime.totem.pg.mrp.srp.members.2.join_count (u32) = 1 -runtime.totem.pg.mrp.srp.members.2.status (str) = joined - -[root@pcmk-1 ~]# pcs status corosync - -Membership information -\---------------------- - Nodeid Votes Name - 1 1 pcmk-1 (local) - 2 1 pcmk-2 ----- - -You should see both nodes have joined the cluster. - -== Verify Pacemaker Installation == - -Now that we have confirmed that Corosync is functional, we can check -the rest of the stack. Pacemaker has already been started, so verify -the necessary processes are running: - ----- -[root@pcmk-1 ~]# ps axf - PID TTY STAT TIME COMMAND - 2 ? S 0:00 [kthreadd] -...lots of processes... -11635 ? SLsl 0:03 corosync -11642 ? Ss 0:00 /usr/sbin/pacemakerd -f -11643 ? Ss 0:00 \_ /usr/libexec/pacemaker/cib -11644 ? Ss 0:00 \_ /usr/libexec/pacemaker/stonithd -11645 ? Ss 0:00 \_ /usr/libexec/pacemaker/lrmd -11646 ? Ss 0:00 \_ /usr/libexec/pacemaker/attrd -11647 ? Ss 0:00 \_ /usr/libexec/pacemaker/pengine -11648 ? Ss 0:00 \_ /usr/libexec/pacemaker/crmd ----- - -If that looks OK, check the `pcs status` output: - ----- -[root@pcmk-1 ~]# pcs status -Cluster name: mycluster -WARNING: no stonith devices and stonith-enabled is not false -Stack: corosync -Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum -Last updated: Mon Sep 10 16:37:34 2018 -Last change: Mon Sep 10 16:30:53 2018 by hacluster via crmd on pcmk-2 - -2 nodes configured -0 resources configured - -Online: [ pcmk-1 pcmk-2 ] - -No resources - - -Daemon Status: - corosync: active/disabled - pacemaker: active/disabled - pcsd: active/enabled ----- - -Finally, ensure there are no start-up errors from corosync or pacemaker (aside -from messages relating to not having STONITH configured, which are OK at this -point): ----- -[root@pcmk-1 ~]# journalctl -b | grep -i error ----- - -[NOTE] -====== -Other operating systems may report startup errors in other locations, -for example +/var/log/messages+. -====== - -Repeat these checks on the other node. The results should be the same. - -== Explore the Existing Configuration == - -For those who are not of afraid of XML, you can see the raw cluster -configuration and status by using the `pcs cluster cib` command. - -.The last XML you'll see in this document -====== ----- -[root@pcmk-1 ~]# pcs cluster cib ----- -[source,XML] ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ----- -====== - -Before we make any changes, it's a good idea to check the validity of -the configuration. - ----- -[root@pcmk-1 ~]# crm_verify -L -V - error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined - error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option - error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -Errors found during check: config not valid ----- - -As you can see, the tool has found some errors. The cluster will not start any -resources until we configure STONITH. diff --git a/doc/Clusters_from_Scratch/en-US/Clusters_from_Scratch.ent b/doc/Clusters_from_Scratch/en-US/Clusters_from_Scratch.ent deleted file mode 100644 index ce8d7d1838..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Clusters_from_Scratch.ent +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/doc/Clusters_from_Scratch/en-US/Clusters_from_Scratch.xml b/doc/Clusters_from_Scratch/en-US/Clusters_from_Scratch.xml deleted file mode 100644 index d69f167134..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Clusters_from_Scratch.xml +++ /dev/null @@ -1,24 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - - - - - - - - - - - - - - diff --git a/doc/Clusters_from_Scratch/en-US/Preface.xml b/doc/Clusters_from_Scratch/en-US/Preface.xml deleted file mode 100644 index 678b160af3..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Preface.xml +++ /dev/null @@ -1,13 +0,0 @@ - - -%BOOK_ENTITIES; -]> -Preface - - - - - - - diff --git a/doc/Clusters_from_Scratch/en-US/Revision_History.xml b/doc/Clusters_from_Scratch/en-US/Revision_History.xml deleted file mode 100644 index 8a61d586e7..0000000000 --- a/doc/Clusters_from_Scratch/en-US/Revision_History.xml +++ /dev/null @@ -1,219 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - Revision History - - - - 1-0 - Mon May 17 2010 - - AndrewBeekhof - andrew@beekhof.net - - - - Import from Pages.app - - - - - 2-0 - Wed Sep 22 2010 - - RaoulScarazzini - rasca@miamammausalinux.org - - - - Italian translation - - - - - 3-0 - Wed Feb 9 2011 - - Andrew - Beekhof - andrew@beekhof.net - - - - Updated for Fedora 13 - - - - - 4-0 - Wed Oct 5 2011 - - Andrew - Beekhof - andrew@beekhof.net - - - - Update the GFS2 section to use CMAN - - - - - 5-0 - Fri Feb 10 2012 - - Andrew - Beekhof - andrew@beekhof.net - - - - Generate docbook content from asciidoc sources - - - - - 6-0 - Tues July 3 2012 - - AndrewBeekhof - andrew@beekhof.net - - - - Updated for Fedora 17 - - - - - 7-0 - Fri Sept 14 2012 - - DavidVossel - davidvossel@gmail.com - - - - Updated for pcs - - - - - 8-0 - Mon Jan 05 2015 - - KenGaillot - kgaillot@redhat.com - - - - Updated for Fedora 21 - - - - 8-1 - Thu Jan 08 2015 - - KenGaillot - kgaillot@redhat.com - - - - Minor corrections, plus use include file for intro - - - - - 9-0 - Fri Aug 14 2015 - - KenGaillot - kgaillot@redhat.com - - - - Update for CentOS 7.1 and leaving firewalld/SELinux enabled - - - - - 10-0 - Fri Jan 12 2018 - - KenGaillot - kgaillot@redhat.com - - - - Update banner for Pacemaker 2.0 and content for CentOS 7.4 with Pacemaker 1.1.16 - - - - - 10-1 - Wed Sep 5 2018 - - KenGaillot - kgaillot@redhat.com - - - - Update for CentOS 7.5 with Pacemaker 1.1.18 - - - - - 10-2 - Fri Dec 7 2018 - - KenGaillot - kgaillot@redhat.com - - - JanPokorný - jpokorny@redhat.com - - - ChrisLumens - clumens@redhat.com - - - - Minor clarifications and formatting changes - - - - - 11-0 - Thu Jul 18 2019 - - TomasJelinek - tojeline@redhat.com - - - - Note differences in pcs 0.10 - - - - - 11-1 - Thu Nov 21 2019 - - KenGaillot - kgaillot@redhat.com - - - - Remove references to obsolete cloned IP usage, and - reorganize chapters a bit - - - - - - diff --git a/doc/Clusters_from_Scratch/en-US/images b/doc/Clusters_from_Scratch/en-US/images deleted file mode 120000 index 963300dd95..0000000000 --- a/doc/Clusters_from_Scratch/en-US/images +++ /dev/null @@ -1 +0,0 @@ -../../shared/en-US/images \ No newline at end of file diff --git a/doc/Clusters_from_Scratch/publican.cfg.in b/doc/Clusters_from_Scratch/publican.cfg.in deleted file mode 100644 index 816267d7e0..0000000000 --- a/doc/Clusters_from_Scratch/publican.cfg.in +++ /dev/null @@ -1,14 +0,0 @@ -# Config::Simple 4.59 -# Fri Apr 23 15:33:52 2010 - -docname: Clusters_from_Scratch -xml_lang: en-US -#edition: 1 -type: Book -version: @PACKAGE_SERIES@ -brand: @PUBLICAN_BRAND@ -product: Pacemaker - -chunk_first: 0 -chunk_section_depth: 3 -generate_section_toc_level: 4 diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index bc449dfb76..7f9720e616 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -1,2280 +1,2280 @@ # Doxyfile 1.8.5 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all text # before the first occurrence of this tag. Doxygen uses libiconv (or the iconv # built into libc) for the transcoding. See http://www.gnu.org/software/libiconv # for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = @PACKAGE_NAME@ # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = @PACKAGE_VERSION@-@BUILD_VERSION@ # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "Scalable High-Availability cluster resource manager" # With the PROJECT_LOGO tag one can specify an logo or icon that is included in # the documentation. The maximum height of the logo should not exceed 55 pixels # and the maximum width should not exceed 200 pixels. Doxygen will copy the logo # to the output directory. -PROJECT_LOGO = publican-clusterlabs/en-US/images/title_logo.png +#PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = api/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese- # Traditional, Croatian, Czech, Danish, Dutch, English, Esperanto, Farsi, # Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en, # Korean, Korean-en, Latvian, Norwegian, Macedonian, Persian, Polish, # Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, # Turkish, Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = YES # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = .. # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = .. # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a # new page for each member. If set to NO, the documentation of a member will be # part of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, # C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. # # Note For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by by putting a % sign in front of the word # or globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = YES # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined # locally in source files will be included in the documentation. If set to NO # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. When set to YES local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO these classes will be included in the various overviews. This option has # no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # (class|struct|union) declarations. If set to NO these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO the members will appear in declaration order. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = YES # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the # todo list. This list is created by putting \todo commands in the # documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the # test list. This list is created by putting \test commands in the # documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if ... \endif and \cond # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES the list # will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. Do not use file names with spaces, bibtex cannot handle them. See # also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO doxygen will only warn about wrong or incomplete parameter # documentation, but not about the absence of documentation. # The default value is: NO. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. # Note: If this tag is empty the current directory is searched. INPUT = ../include ../lib # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: http://www.gnu.org/software/libiconv) for the list of # possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank the # following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, # *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, # *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, # *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, # *.qsf, *.as and *.js. FILE_PATTERNS = # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = . # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = YES # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # # # where is the value of the INPUT_FILTER tag, and is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER ) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # function all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES, then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see http://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the config file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in # which the alphabetical index list will be split. # Minimum value: 1, maximum value: 20, default value: 5. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- # defined cascading style sheet that is included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefor more robust against future updates. # Doxygen will copy the style sheet file to the output directory. For an example # see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the stylesheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # http://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to NO can help when comparing the output of multiple runs. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: http://developer.apple.com/tools/xcode/), introduced with # OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Pacemaker # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.ClusterLabs # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = ClusterLabs # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler ( hhc.exe). If non-empty # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated ( # YES) or that it should be included in the master .chm file ( NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated ( # YES) or a normal table of contents ( NO) in the .chm file. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- # folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location of Qt's # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the # generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering # instead of using prerendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from http://www.mathjax.org before deployment. # The default value is: http://cdn.mathjax.org/mathjax/latest. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://www.mathjax.org/mathjax # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use + S # (what the is depends on the OS and browser, but it is typically # , /