diff --git a/.gitignore b/.gitignore index 06df650a98..197d71dca9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,335 +1,336 @@ # # Copyright 2011-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # Common conventions for files that should be ignored *~ *.bz2 *.diff *.orig *.patch *.rej *.sed *.swp *.tar.gz *.tgz \#* .\#* logs # libtool artifacts *.la *.lo .libs libltdl libtool libtool.m4 ltdl.m4 /m4/argz.m4 /m4/ltargz.m4 /m4/ltoptions.m4 /m4/ltsugar.m4 /m4/ltversion.m4 /m4/lt~obsolete.m4 # autotools artifacts .deps .dirstamp Makefile Makefile.in aclocal.m4 autoconf autoheader autom4te.cache/ automake /confdefs.h config.log config.status configure /conftest* # gettext artifacts /ABOUT-NLS /m4/codeset.m4 /m4/fcntl-o.m4 /m4/gettext.m4 /m4/glibc2.m4 /m4/glibc21.m4 /m4/iconv.m4 /m4/intdiv0.m4 /m4/intl.m4 /m4/intldir.m4 /m4/intlmacosx.m4 /m4/intmax.m4 /m4/inttypes-pri.m4 /m4/inttypes_h.m4 /m4/lcmessage.m4 /m4/lib-ld.m4 /m4/lib-link.m4 /m4/lib-prefix.m4 /m4/lock.m4 /m4/longlong.m4 /m4/nls.m4 /m4/po.m4 /m4/printf-posix.m4 /m4/progtest.m4 /m4/size_max.m4 /m4/stdint_h.m4 /m4/threadlib.m4 /m4/uintmax_t.m4 /m4/visibility.m4 /m4/wchar_t.m4 /m4/wint_t.m4 /m4/xsize.m4 /po/*.gmo /po/*.header /po/*.pot /po/*.sin /po/Makefile.in.in /po/Makevars.template /po/POTFILES /po/Rules-quot /po/stamp-po # configure targets /cts/benchmark/clubench +/cts/cts-attrd /cts/cts-cli /cts/cts-coverage /cts/cts-exec /cts/cts-fencing /cts/cts-regression /cts/cts-scheduler /cts/lab/CTS.py /cts/lab/CTSlab.py /cts/lab/OCFIPraTest.py /cts/lab/cluster_test /cts/lab/cts /cts/lab/cts-log-watcher /cts/lxc_autogen.sh /cts/support/LSBDummy /cts/support/cts-support /cts/support/fence_dummy /cts/support/pacemaker-cts-dummyd /cts/support/pacemaker-cts-dummyd@.service /daemons/execd/pacemaker_remote /daemons/execd/pacemaker_remote.service /daemons/fenced/fence_legacy /daemons/fenced/fence_watchdog /daemons/pacemakerd/pacemaker.combined.upstart /daemons/pacemakerd/pacemaker.service /daemons/pacemakerd/pacemaker.upstart /doc/Doxyfile /etc/init.d/pacemaker /etc/logrotate.d/pacemaker /extra/resources/ClusterMon /extra/resources/HealthSMART /extra/resources/SysInfo /extra/resources/controld /extra/resources/ifspeed /extra/resources/o2cb /include/config.h /include/config.h.in /include/crm_config.h /maint/bumplibs /python/pacemaker/buildoptions.py /python/setup.py /tools/cibsecret /tools/crm_error /tools/crm_failcount /tools/crm_master /tools/crm_mon.service /tools/crm_mon.upstart /tools/crm_report /tools/crm_rule /tools/crm_standby /tools/pcmk_simtimes /tools/report.collector /tools/report.common # Compiled targets and intermediary files *.o *.pc *.pyc /daemons/attrd/pacemaker-attrd /daemons/based/pacemaker-based /daemons/controld/pacemaker-controld /daemons/execd/cts-exec-helper /daemons/execd/pacemaker-execd /daemons/execd/pacemaker-remoted /daemons/fenced/cts-fence-helper /daemons/fenced/pacemaker-fenced /daemons/pacemakerd/pacemakerd /daemons/schedulerd/pacemaker-schedulerd /devel/scratch /lib/gnu/stdalign.h /tools/attrd_updater /tools/cibadmin /tools/crmadmin /tools/crm_attribute /tools/crm_diff /tools/crm_mon /tools/crm_node /tools/crm_resource /tools/crm_shadow /tools/crm_simulate /tools/crm_ticket /tools/crm_verify /tools/iso8601 /tools/stonith_admin # Generated XML schema files /xml/crm_mon.rng /xml/pacemaker*.rng /xml/versions.rng /xml/api/api-result*.rng # Working directories for make dist and make export /pacemaker-[a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9] # Documentation build targets and intermediary files *.7 *.7.xml *.7.html *.8 *.8.xml *.8.html GPATH GRTAGS GTAGS TAGS /daemons/fenced/pacemaker-fenced.xml /daemons/schedulerd/pacemaker-schedulerd.xml /doc/.ABI-build /doc/HTML /doc/abi_dumps /doc/abi-check /doc/api/ /doc/compat_reports /doc/crm_fencing.html /doc/sphinx/*/_build /doc/sphinx/*/conf.py /doc/sphinx/*/generated /doc/sphinx/build-2.1.txt /doc/sphinx/shared/images/*.png # Test artifacts (from unit tests, regression tests, static analysis, etc.) *.coverity *.gcda *.gcno coverity-* pacemaker_*.info /coverage /cppcheck.out /cts/scheduler/*.ref /cts/scheduler/*.up /cts/scheduler/*.up.err /cts/scheduler/bug-rh-1097457.log /cts/scheduler/bug-rh-1097457.trs /cts/scheduler/shadow.* /cts/test-suite.log /lib/*/tests/*/*.log /lib/*/tests/*/*_test /lib/*/tests/*/*.trs /xml/test-*/*.up /xml/test-*/*.up.err /xml/assets/*.rng /xml/assets/diffview.js /xml/assets/xmlcatalog /test/_test_file.c # Packaging artifacts *.rpm /pacemaker.spec /rpm/[A-LN-Z]* /rpm/build.counter /rpm/mock # Project maintainer artifacts /maint/gnulib /maint/mocked/based /maint/testcc_helper.cc /maint/testcc_*_h # Formerly built files (helps when jumping back and forth in checkout) /.ABI-build /Doxyfile /HTML /abi_dumps /abi-check /build.counter /compat_reports /compile /cts/.regression.failed.diff /attrd /cib /config.guess /config.sub /coverage.sh /crmd /cts/CTS.py /cts/CTSlab.py /cts/CTSvars.py /cts/HBDummy /cts/LSBDummy /cts/OCFIPraTest.py /cts/cluster_test /cts/cts /cts/cts-log-watcher /cts/cts-support /cts/fence_dummy /cts/lab/CTSvars.py /cts/pacemaker-cts-dummyd /cts/pacemaker-cts-dummyd@.service /daemons/based/cibmon /daemons/pacemakerd/pacemaker /depcomp /doc/*.build /doc/*/en-US/Ap-*.xml /doc/*/en-US/Ch-*.xml /doc/*/publican.cfg /doc/*/publish /doc/*/tmp/** /doc/Clusters_from_Scratch.txt /doc/Pacemaker_Explained.txt /doc/acls.html /doc/publican-catalog* /doc/shared/en-US/*.xml /doc/shared/en-US/images/pcmk-*.png /doc/shared/en-US/images/Policy-Engine-*.png /extra/logrotate/pacemaker /fencing /include/stamp-* /install-sh /lib/common/md5.c /lib/common/tests/flags/pcmk__clear_flags_as /lib/common/tests/flags/pcmk__set_flags_as /lib/common/tests/flags/pcmk_all_flags_set /lib/common/tests/flags/pcmk_any_flags_set /lib/common/tests/operations/parse_op_key /lib/common/tests/strings/pcmk__btoa /lib/common/tests/strings/pcmk__parse_ll_range /lib/common/tests/strings/pcmk__scan_double /lib/common/tests/strings/pcmk__str_any_of /lib/common/tests/strings/pcmk__strcmp /lib/common/tests/strings/pcmk__char_in_any_str /lib/common/tests/utils/pcmk_str_is_infinity /lib/common/tests/utils/pcmk_str_is_minus_infinity /lib/gnu/libgnu.a /lib/pengine/tests/rules/pe_cron_range_satisfied /lrmd /ltmain.sh /mcp /missing /mock /pacemaker-*.spec /pengine /py-compile /scratch /test-driver /xml/crm.dtd ylwrap diff --git a/configure.ac b/configure.ac index 8c5294087a..03224e7df7 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2122 +1,2123 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2023 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08). dnl Once we can require that version, we can simplify this, and no longer dnl need ACLOCAL_AMFLAGS in Makefile.am. m4_ifdef([AC_CONFIG_MACRO_DIRS], [AC_CONFIG_MACRO_DIRS([m4])], [AC_CONFIG_MACRO_DIR([m4])]) AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([m4/version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" LT_CONFIG_LTDL_DIR([libltdl]) AC_CONFIG_AUX_DIR([libltdl/config]) AC_CANONICAL_HOST dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) dnl 1.13: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.13 foreign tar-ustar subdir-objects]) dnl Require minimum version of pkg-config PKG_PROG_PKG_CONFIG(0.27) AS_IF([test x"${PKG_CONFIG}" != x""], [], [AC_MSG_FAILURE([Could not find required build tool pkg-config (0.27 or later)])]) PKG_INSTALLDIR PKG_NOARCH_INSTALLDIR dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== dnl A particular compiler can be forced by setting the CC environment variable AC_PROG_CC dnl Use at least C99 if possible (automatic for autoconf >= 2.70) m4_version_prereq([2.70], [:], [AC_PROG_CC_STDC]) dnl C++ is not needed for build, just maintainer utilities AC_PROG_CXX dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs: dnl "The macro gl_EARLY must be called as soon as possible after verifying that dnl the C compiler is working. ... The core part of the gnulib checks are done dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL dnl as a dependency. gl_EARLY gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT # --enable-new-dtags: Use RUNPATH instead of RPATH. # It is necessary to have this done before libtool does linker detection. # See also: https://github.com/kronosnet/kronosnet/issues/107 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags], [AM_LDFLAGS=-Wl,--enable-new-dtags], [AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])]) AC_SUBST([AM_LDFLAGS]) saved_LDFLAGS="$LDFLAGS" LDFLAGS="$AM_LDFLAGS $LDFLAGS" LT_INIT([dlopen]) LDFLAGS="$saved_LDFLAGS" LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING([whether $CC supports $@]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } # expand_path_option $path_variable_name $default expand_path_option() { # The first argument is the variable *name* (not value) ac_path_varname="$1" # Get the original value of the variable ac_path_value=$(eval echo "\${${ac_path_varname}}") # Expand any literal variable expressions in the value so that we don't # end up with something like '${prefix}' in #defines etc. # # Autoconf deliberately leaves values unexpanded to allow overriding # the configure script choices in make commands (for example, # "make exec_prefix=/foo install"). No longer being able to do this seems # like no great loss. eval ac_path_value=$(eval echo "${ac_path_value}") # Use (expanded) default if necessary AS_IF([test x"${ac_path_value}" = x""], [eval ac_path_value=$(eval echo "$2")]) # Require a full path AS_CASE(["$ac_path_value"], [/*], [eval ${ac_path_varname}="$ac_path_value"], [*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])] ) } # yes_no_try $user_response $default DISABLED=0 REQUIRED=1 OPTIONAL=2 yes_no_try() { local value AS_IF([test x"$1" = x""], [value="$2"], [value="$1"]) AS_CASE(["`echo "$value" | tr '[A-Z]' '[a-z]'`"], [0|no|false|disable], [return $DISABLED], [1|yes|true|enable], [return $REQUIRED], [try|check], [return $OPTIONAL] ) AC_MSG_ERROR([Invalid option value "$value"]) } check_systemdsystemunitdir() { AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) test x"$systemdsystemunitdir" != x"" return $? } dnl =============================================== dnl Configure Options dnl =============================================== dnl Actual library checks come later, but pkg-config can be used here to grab dnl external values to use as defaults for configure options dnl Per the autoconf docs, --enable-*/--disable-* options should control dnl features inherent to Pacemaker, while --with-*/--without-* options should dnl control the use of external software. However, --enable-*/--disable-* may dnl implicitly require additional external dependencies, and dnl --with-*/--without-* may implicitly enable or disable features, so the dnl line is blurry. dnl dnl We also use --with-* options for custom file, directory, and path dnl locations, since autoconf does not provide an option type for those. dnl --enable-* options: build process AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])] ) yes_no_try "$enable_quiet" "no" enable_quiet=$? AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@try@:>@])], ) yes_no_try "$enable_fatal_warnings" "try" enable_fatal_warnings=$? AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])] ) yes_no_try "$enable_hardening" "try" enable_hardening=$? dnl --enable-* options: features AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])] ) yes_no_try "$enable_systemd" "try" enable_systemd=$? AC_ARG_ENABLE([upstart], [AS_HELP_STRING([--enable-upstart], [enable support for managing resources via Upstart (deprecated) @<:@try@:>@])] ) yes_no_try "$enable_upstart" "try" enable_upstart=$? dnl --enable-* options: features inherent to Pacemaker AC_ARG_ENABLE([compat-2.0], [AS_HELP_STRING([--enable-compat-2.0], m4_normalize([ preserve certain output as it was in 2.0; this option will be available only for the lifetime of the 2.1 series @<:@no@:>@]))] ) yes_no_try "$enable_compat_2_0" "no" enable_compat_2_0=$? AS_IF([test $enable_compat_2_0 -ne $DISABLED], [ AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1], [Keep certain output compatible with 2.0 release series]) PCMK_FEATURES="$PCMK_FEATURES compat-2.0" ] ) # Add an option to create symlinks at the pre-2.0.0 daemon name locations, so # that users and tools can continue to invoke those names directly (e.g., for # meta-data). This option will be removed in a future release. AC_ARG_ENABLE([legacy-links], [AS_HELP_STRING([--enable-legacy-links], [add symlinks for old daemon names (deprecated) @<:@no@:>@])] ) yes_no_try "$enable_legacy_links" "no" enable_legacy_links=$? AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED]) # AM_GNU_GETTEXT calls AM_NLS which defines the nls option, but it defaults # to enabled. We override the definition of AM_NLS to flip the default and mark # it as experimental in the help text. AC_DEFUN([AM_NLS], [AC_MSG_CHECKING([whether NLS is requested]) AC_ARG_ENABLE([nls], [AS_HELP_STRING([--enable-nls], [use Native Language Support (experimental)])], USE_NLS=$enableval, USE_NLS=no) AC_MSG_RESULT([$USE_NLS]) AC_SUBST([USE_NLS])] ) AM_GNU_GETTEXT([external]) AM_GNU_GETTEXT_VERSION([0.18]) AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"]) dnl --with-* options: external software support, and custom locations dnl This argument is defined via an M4 macro so default can be a variable AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACEMAKER_VERSION="$withval" ], [ PACEMAKER_VERSION="$PACKAGE_VERSION" ])] ) VERSION_ARG(VERSION_NUMBER) # Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case # the user used --with-version. Unfortunately, this can only affect the # substitution variables and later uses in this file, not the config.h # constants, so we have to be careful to use only PACEMAKER_VERSION in C code. PACKAGE_VERSION=$PACEMAKER_VERSION VERSION=$PACEMAKER_VERSION CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], m4_normalize([ address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))], [ BUG_URL="$withval" ] ) dnl --with-* options: features AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets @<:@no@:>@])] ) yes_no_try "$with_cibsecrets" "no" with_cibsecrets=$? AC_ARG_WITH([gnutls], [AS_HELP_STRING([--with-gnutls], [support Pacemaker Remote and remote-tls-port using GnuTLS @<:@try@:>@])] ) yes_no_try "$with_gnutls" "try" with_gnutls=$? PCMK_GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] ) AC_ARG_WITH([concurrent-fencing-default], [AS_HELP_STRING([--with-concurrent-fencing-default], [default value for concurrent-fencing cluster option @<:@false@:>@])], ) AS_CASE([$with_concurrent_fencing_default], [""], [with_concurrent_fencing_default="false"], [false], [], [true], [PCMK_FEATURES="$PCMK_FEATURES default-concurrent-fencing"], [AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])] ) AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT], ["$with_concurrent_fencing_default"], [Default value for concurrent-fencing cluster option]) AC_ARG_WITH([sbd-sync-default], [AS_HELP_STRING([--with-sbd-sync-default], m4_normalize([ default value used by sbd if SBD_SYNC_RESOURCE_STARTUP environment variable is not set @<:@false@:>@]))], ) AS_CASE([$with_sbd_sync_default], [""], [with_sbd_sync_default=false], [false], [], [true], [PCMK_FEATURES="$PCMK_FEATURES default-sbd-sync"], [AC_MSG_ERROR([Invalid value "$with_sbd_sync_default" for --with-sbd-sync-default])] ) AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT], [$with_sbd_sync_default], [Default value for SBD_SYNC_RESOURCE_STARTUP environment variable]) AC_ARG_WITH([resource-stickiness-default], [AS_HELP_STRING([--with-resource-stickiness-default], [If positive, value to add to new CIBs as explicit resource default for resource-stickiness @<:@0@:>@])], ) errmsg="Invalid value \"$with_resource_stickiness_default\" for --with-resource-stickiness-default" AS_CASE([$with_resource_stickiness_default], [0|""], [with_resource_stickiness_default="0"], [*[[!0-9]]*], [AC_MSG_ERROR([$errmsg])], [PCMK_FEATURES="$PCMK_FEATURES default-resource-stickiness"] ) AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT], [$with_resource_stickiness_default], [Default value for resource-stickiness resource meta-attribute]) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer @<:@try@:>@])] ) yes_no_try "$with_corosync" "try" with_corosync=$? dnl Get default from corosync if possible. PKG_CHECK_VAR([PCMK__COROSYNC_CONF], [corosync], [corosysconfdir], [], [PCMK__COROSYNC_CONF="${sysconfdir}/corosync/corosync.conf"]) AC_ARG_WITH([corosync-conf], [AS_HELP_STRING([--with-corosync-conf], m4_normalize([ location of Corosync configuration file @<:@value from Corosync package if available otherwise SYSCONFDIR/corosync/corosync.conf@:>@]))], [ PCMK__COROSYNC_CONF="$withval" ] ) AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios resources])] ) yes_no_try "$with_nagios" "try" with_nagios=$? dnl --with-* options: directory locations AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], [directory for init (rc) scripts])], [ INITDIR="$withval" ] ) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) dnl --runstatedir is available as of autoconf 2.70 (2020-12-08). When users dnl have an older version, they can use our --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) dnl Get default from resource-agents if possible. Otherwise, the default uses dnl /usr/lib rather than libdir because it's determined by the OCF project and dnl not Pacemaker. Even if a user wants to install Pacemaker to /usr/local or dnl such, the OCF agents will be expected in their usual location. However, we dnl do give the user the option to override it. PKG_CHECK_VAR([OCF_ROOT_DIR], [resource-agents], [ocfrootdir], [], [OCF_ROOT_DIR="/usr/lib/ocf"]) AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], m4_normalize([ OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@value from resource-agents package if available otherwise /usr/lib/ocf@:>@]))], [ OCF_ROOT_DIR="$withval" ] ) AC_SUBST(OCF_ROOT_DIR) AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"], [OCF root directory for resource agents and libraries]) PKG_CHECK_VAR([OCF_RA_PATH], [resource-agents], [ocfrapath], [], [OCF_RA_PATH="$OCF_ROOT_DIR/resource.d"]) AC_ARG_WITH([ocfrapath], [AS_HELP_STRING([--with-ocfrapath=DIR], m4_normalize([ OCF resource agent directories (colon-separated) to search @<:@value from resource-agents package if available otherwise OCFDIR/resource.d@:>@]))], [ OCF_RA_PATH="$withval" ] ) AC_SUBST(OCF_RA_PATH) OCF_RA_INSTALL_DIR="$OCF_ROOT_DIR/resource.d" AC_ARG_WITH([ocfrainstalldir], [AS_HELP_STRING([--with-ocfrainstalldir=DIR], m4_normalize([ OCF installation directory for Pacemakers resource agents @<:@OCFDIR/resource.d@:>@]))], [ OCF_RA_INSTALL_DIR="$withval" ] ) AC_SUBST(OCF_RA_INSTALL_DIR) dnl Get default from fence-agents if available PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix], [PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"], [PCMK__FENCE_BINDIR="$sbindir"]) AC_ARG_WITH([fence-bindir], [AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([ directory for executable fence agents @<:@value from fence-agents package if available otherwise SBINDIR@:>@]))], [ PCMK__FENCE_BINDIR="$withval" ] ) AC_SUBST(PCMK__FENCE_BINDIR) dnl --with-* options: non-production testing AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations, for effective profiling @<:@no@:>@])] ) yes_no_try "$with_profiling" "no" with_profiling=$? AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations, for effective profiling and coverage testing @<:@no@:>@])] ) yes_no_try "$with_coverage" "no" with_coverage=$? AC_ARG_WITH([sanitizers], [AS_HELP_STRING([--with-sanitizers=...,...], [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])], [ SANITIZERS="$withval" ], [ SANITIZERS="" ]) dnl Environment variable options AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) dnl =============================================== dnl General Processing dnl =============================================== AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION", [Version number of this Pacemaker build]) PACKAGE_SERIES=`echo $VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_PROG_LN_S AC_PROG_MKDIR_P # Check for fatal warning support AS_IF([test $enable_fatal_warnings -ne $DISABLED && test x"$GCC" = x"yes" && cc_supports_flag -Werror], [WERROR="-Werror"], [ WERROR="" AS_CASE([$enable_fatal_warnings], [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])], [$OPTIONAL], [ AC_MSG_NOTICE([Compiler does not support fatal warnings]) enable_fatal_warnings=$DISABLED ]) ]) AC_MSG_NOTICE([Sanitizing prefix: ${prefix}]) AS_IF([test x"$prefix" = x"NONE"], [ prefix=/usr dnl Fix default variables - "prefix" variable if not specified AS_IF([test x"$localstatedir" = x"\${prefix}/var"], [localstatedir="/var"]) AS_IF([test x"$sysconfdir" = x"\${prefix}/etc"], [sysconfdir="/etc"]) ]) AC_MSG_NOTICE([Sanitizing exec_prefix: ${exec_prefix}]) AS_CASE([$exec_prefix], [prefix|NONE], [exec_prefix=$prefix]) AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}]) AS_CASE([$INITDIR], [prefix], [INITDIR=$prefix], [""], [ AC_MSG_CHECKING([which init (rc) directory to use]) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do AS_IF([test -d $initdir], [ INITDIR=$initdir break ]) done AC_MSG_RESULT([$INITDIR]) ]) AC_SUBST(INITDIR) AC_MSG_NOTICE([Sanitizing libdir: ${libdir}]) AS_CASE([$libdir], [prefix|NONE], [ AC_MSG_CHECKING([which lib directory to use]) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" AS_IF([test -d ${trydir}], [ libdir=${trydir} break ]) done AC_MSG_RESULT([$libdir]) ]) dnl Expand values of autoconf-provided directory options expand_path_option prefix expand_path_option exec_prefix expand_path_option bindir expand_path_option sbindir expand_path_option libexecdir expand_path_option datadir expand_path_option sysconfdir expand_path_option sharedstatedir expand_path_option localstatedir expand_path_option libdir expand_path_option includedir expand_path_option oldincludedir expand_path_option infodir expand_path_option mandir dnl Home-grown variables expand_path_option localedir "${datadir}/locale" AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"], [Base directory for message catalogs]) AS_IF([test x"${runstatedir}" = x""], [runstatedir="${pcmk_runstatedir}"]) expand_path_option runstatedir "${localstatedir}/run" AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) expand_path_option INITDIR AC_DEFINE_UNQUOTED([PCMK__LSB_INIT_DIR], ["$INITDIR"], [Location for LSB init scripts]) expand_path_option docdir "${datadir}/doc/${PACKAGE}-${VERSION}" AC_SUBST(docdir) expand_path_option CONFIGDIR "${sysconfdir}/sysconfig" AC_SUBST(CONFIGDIR) expand_path_option PCMK__COROSYNC_CONF "${sysconfdir}/corosync/corosync.conf" AC_SUBST(PCMK__COROSYNC_CONF) expand_path_option CRM_LOG_DIR "${localstatedir}/log/pacemaker" AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) expand_path_option CRM_BUNDLE_DIR "${localstatedir}/log/pacemaker/bundles" AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) expand_path_option PCMK__FENCE_BINDIR AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR", [Location for executable fence agents]) expand_path_option OCF_RA_PATH AC_DEFINE_UNQUOTED([OCF_RA_PATH], ["$OCF_RA_PATH"], [OCF directories to search for resource agents ]) AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [], [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])]) AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) AS_IF([test x"${BUG_URL}" = x""], [BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"]) AC_SUBST(BUG_URL) AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"], [Where bugs should be reported]) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR localedir do dirname=`eval echo '${'${j}'}'` AS_IF([test ! -d "$dirname"], [AC_MSG_WARN([$j directory ($dirname) does not exist (yet)])]) done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([HAVE_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([HAVE_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) AS_IF([test -z "${us_auth}"], [ # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([HAVE_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([HAVE_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_FAILURE([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) ]) dnl OS-based decision-making is poor autotools practice; feature-based dnl mechanisms are strongly preferred. Keep this section to a bare minimum; dnl regard as a "necessary evil". INIT_EXT="" PROCFS=0 dnl Solaris and some *BSD versions support procfs but not files we need AS_CASE(["$host_os"], [*bsd*], [INIT_EXT=".sh"], [*linux*], [PROCFS=1], [darwin*], [ LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ]) AC_SUBST(INIT_EXT) AM_CONDITIONAL([SUPPORT_PROCFS], [test $PROCFS -eq 1]) AC_DEFINE_UNQUOTED([HAVE_LINUX_PROCFS], [$PROCFS], [Define to 1 if procfs is supported]) AS_CASE(["$host_cpu"], [ppc64|powerpc64], [ AS_CASE([$CFLAGS], [*powerpc64*], [], [*], [AS_IF([test x"$GCC" = x"yes"], [CFLAGS="$CFLAGS -m64"]) ]) ]) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])]) dnl Require a minimum Python version AM_PATH_PYTHON([3.4]) AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) dnl Bash is needed for building man pages and running regression tests. dnl BASH is already an environment variable, so use something else. AC_PATH_PROG([BASH_PATH], [bash]) AS_IF([test x"${BASH_PATH}" != x""], [], [AC_MSG_FAILURE([Could not find required build tool bash])]) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") AS_IF([test x"${HELP2MAN}" != x""], [PCMK_FEATURES="$PCMK_FEATURES generated-manpages"]) MANPAGE_XSLT="" AS_IF([test x"${XSLTPROC}" != x""], [ AC_MSG_CHECKING([for DocBook-to-manpage transform]) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') AS_IF([test x"${MANPAGE_XSLT}" = x""], [ DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d 2>/dev/null | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS} do AS_IF([test -f "${d}/${XSLT}"], [ MANPAGE_XSLT="${d}/${XSLT}" break ]) done ]) ]) AC_MSG_RESULT([$MANPAGE_XSLT]) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") AS_IF([test x"${MANPAGE_XSLT}" != x""], [PCMK_FEATURES="$PCMK_FEATURES agent-manpages"]) AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) AS_IF([test x"${ASCIIDOC_CONV}" != x""], [PCMK_FEATURES="$PCMK_FEATURES ascii-docs"]) AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""]) AM_COND_IF([BUILD_SPHINX_DOCS], [PCMK_FEATURES="$PCMK_FEATURES books"]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" AS_IF([test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH"], [ $GETOPT_PATH -T >/dev/null 2>/dev/null AS_IF([test $? -eq 4], [break]) ]) GETOPT_PATH="" done IFS=$IFS_orig AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])], [ AC_MSG_RESULT([no]) AC_MSG_ERROR([Could not find required build tool GNU-compatible getopt]) ]) AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolerably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolerably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir strerror strchrnul strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) PKG_CHECK_MODULES([UUID], [uuid], [CPPFLAGS="${CPPFLAGS} ${UUID_CFLAGS}" LIBS="${LIBS} ${UUID_LIBS}"]) AC_CHECK_FUNCS([sched_setscheduler]) AS_IF([test x"$ac_cv_func_sched_setscheduler" != x"yes"], [PC_LIBS_RT=""], [PC_LIBS_RT="-lrt"]) AC_SUBST(PC_LIBS_RT) # Require minimum glib version PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.42.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # Check whether high-resolution sleep function is available AC_CHECK_FUNCS([nanosleep usleep]) # # Where is dlopen? # AS_IF([test x"$ac_cv_lib_c_dlopen" = x"yes"], [LIBADD_DL=""], [test x"$ac_cv_lib_dl_dlopen" = x"yes"], [LIBADD_DL=-ldl], [LIBADD_DL=${lt_cv_dlopen_libs}]) PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) REQUIRE_LIB([xslt], [xsltApplyStylesheet]) dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) cc_temp_flags "$CFLAGS $WERROR" # Optional headers (inclusion of these should be conditional in C code) AC_CHECK_HEADERS([linux/swab.h]) AC_CHECK_HEADERS([stddef.h]) AC_CHECK_HEADERS([sys/signalfd.h]) AC_CHECK_HEADERS([uuid/uuid.h]) AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h]) # Required headers REQUIRE_HEADER([arpa/inet.h]) REQUIRE_HEADER([ctype.h]) REQUIRE_HEADER([dirent.h]) REQUIRE_HEADER([errno.h]) REQUIRE_HEADER([glib.h]) REQUIRE_HEADER([grp.h]) REQUIRE_HEADER([limits.h]) REQUIRE_HEADER([netdb.h]) REQUIRE_HEADER([netinet/in.h]) REQUIRE_HEADER([netinet/ip.h], [ #include #include ]) REQUIRE_HEADER([pwd.h]) REQUIRE_HEADER([signal.h]) REQUIRE_HEADER([stdio.h]) REQUIRE_HEADER([stdlib.h]) REQUIRE_HEADER([string.h]) REQUIRE_HEADER([strings.h]) REQUIRE_HEADER([sys/ioctl.h]) REQUIRE_HEADER([sys/param.h]) REQUIRE_HEADER([sys/reboot.h]) REQUIRE_HEADER([sys/resource.h]) REQUIRE_HEADER([sys/socket.h]) REQUIRE_HEADER([sys/stat.h]) REQUIRE_HEADER([sys/time.h]) REQUIRE_HEADER([sys/types.h]) REQUIRE_HEADER([sys/utsname.h]) REQUIRE_HEADER([sys/wait.h]) REQUIRE_HEADER([time.h]) REQUIRE_HEADER([unistd.h]) REQUIRE_HEADER([libxml/xpath.h]) REQUIRE_HEADER([libxslt/xslt.h]) cc_restore_flags AC_CHECK_FUNCS([uuid_unparse], [], [AC_MSG_FAILURE([Could not find required C function uuid_unparse()])]) AC_CACHE_CHECK([whether __progname and __progname_full are available], [pf_cv_var_progname], [AC_LINK_IFELSE( [AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]], [[__progname = "foo"; __progname_full = "foo bar";]])], [pf_cv_var_progname="yes"], [pf_cv_var_progname="no"] )] ) AS_IF([test x"$pf_cv_var_progname" = x"yes"], [AC_DEFINE(HAVE_PROGNAME,1,[Define to 1 if processes can change their name])]) dnl ======================================================================== dnl Generic declarations dnl ======================================================================== AC_CHECK_DECLS([CLOCK_MONOTONIC], [PCMK_FEATURES="$PCMK_FEATURES monotonic"], [], [[ #include ]]) dnl ======================================================================== dnl Unit test declarations dnl ======================================================================== AC_CHECK_DECLS([assert_float_equal], [], [], [[ #include #include #include #include ]]) cc_temp_flags "$CFLAGS -Wl,--wrap=uname" WRAPPABLE_UNAME="no" AC_MSG_CHECKING([if uname() can be wrapped]) AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include int __wrap_uname(struct utsname *buf) { return 100; } int main(int argc, char **argv) { struct utsname x; return uname(&x) == 100 ? 0 : 1; } ]])], [ WRAPPABLE_UNAME="yes" ], [ WRAPPABLE_UNAME="no"]) AC_MSG_RESULT([$WRAPPABLE_UNAME]) AM_CONDITIONAL([WRAPPABLE_UNAME], [test x"$WRAPPABLE_UNAME" = x"yes"]) cc_restore_flags dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== REQUIRE_FUNC([getopt]) REQUIRE_FUNC([setenv]) REQUIRE_FUNC([unsetenv]) REQUIRE_FUNC([vasprintf]) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) AS_IF([test x"$pf_cv_var_sscanf" = x"yes"], [AC_DEFINE([HAVE_SSCANF_M], [1], [Define to 1 if sscanf %m modifier is available])]) dnl ======================================================================== dnl bzip2 dnl ======================================================================== REQUIRE_HEADER([bzlib.h]) REQUIRE_LIB([bz2], [BZ2_bzBuffToBuffCompress]) dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[sighandler_t *f;]])], [ AC_MSG_RESULT([yes]) AC_DEFINE([HAVE_SIGHANDLER_T], [1], [Define to 1 if sighandler_t is available]) ], [AC_MSG_RESULT([no])]) dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurses takes precedence. dnl AC_CHECK_HEADERS([curses.h curses/curses.h ncurses.h ncurses/ncurses.h]) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" AS_IF([test x"$ac_cv_header_ncurses_h" = x"yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" ]) AS_IF([test x"$ac_cv_header_ncurses_ncurses_h" = x"yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" ]) dnl Only look for non-n-library if there was no n-library. AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_h" = x"yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" ]) dnl Only look for non-n-library if there was no n-library. AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_curses_h" = x"yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" ]) AS_IF([test x"$CURSESLIBS" != x""], [PCMK_FEATURES="$PCMK_FEATURES ncurses"]) dnl Check for printw() prototype compatibility AS_IF([test x"$CURSESLIBS" != x"" && cc_supports_flag -Wcast-qual], [ ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 AS_IF([cc_supports_flag -fPIC], [cc_temp_flags "-Wcast-qual $WERROR -fPIC"], [cc_temp_flags "-Wcast-qual $WERROR"]) AC_MSG_CHECKING([whether curses library is compatible]) AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [AC_MSG_RESULT([yes])], [ AC_MSG_RESULT([no]) AC_MSG_WARN(m4_normalize([Disabling curses because the printw() function of your (n)curses library is old. If you wish to enable curses, update to a newer version (ncurses 5.4 or later is recommended, available from https://invisible-island.net/ncurses/) ])) AC_DEFINE([HAVE_INCOMPATIBLE_PRINTW], [1], [Define to 1 if curses library has incompatible printw()]) ] ) LIBS=$ac_save_LIBS cc_restore_flags ]) AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== CFLAGS_ORIG="$CFLAGS" AS_IF([test $with_coverage -ne $DISABLED], [ with_profiling=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ] ) AS_IF([test $with_profiling -ne $DISABLED], [ with_profiling=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES profile" dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin" dnl CFLAGS="$CFLAGS -fno-inline-functions" dnl CFLAGS="$CFLAGS -fno-default-inline" dnl CFLAGS="$CFLAGS -fno-inline-functions-called-once" dnl CFLAGS="$CFLAGS -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed \ -e 's/-O.\ //g' \ -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' \ -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" AC_MSG_NOTICE([CFLAGS before adding profiling options: $CFLAGS_ORIG]) AC_MSG_NOTICE([CFLAGS after: $CFLAGS]) ] ) AC_DEFINE_UNQUOTED([SUPPORT_PROFILING], [$with_profiling], [Support profiling]) AM_CONDITIONAL([BUILD_PROFILING], [test "$with_profiling" = "$REQUIRED"]) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== PKG_CHECK_MODULES(libqb, libqb >= 0.17) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 2.0.5+ (2022-03) AC_CHECK_FUNCS([qb_ipcc_connect_async]) dnl libqb 2.0.2+ (2020-10) AC_CHECK_FUNCS([qb_ipcc_auth_get]) dnl libqb 2.0.0+ (2020-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available AS_IF([test x"$cross_compiling" != x"yes"], [CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat"]) AC_CHECK_HEADERS([stonith/stonith.h], [ AC_CHECK_LIB([pils], [PILLoadPlugin]) AC_CHECK_LIB([plumb], [G_main_add_IPC_Channel]) PCMK_FEATURES="$PCMK_FEATURES lha" ]) AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test x"$ac_cv_header_stonith_stonith_h" = x"yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"], [Directory Pacemaker daemons should change to (without systemd, core files will go here)]) AC_SUBST(CRM_CORE_DIR) AS_IF([test x"${CRM_DAEMON_USER}" = x""], [CRM_DAEMON_USER="hacluster"]) AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) AS_IF([test x"${CRM_DAEMON_GROUP}" = x""], [CRM_DAEMON_GROUP="haclient"]) AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries]) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING([build version]) BUILD_VERSION=$Format:%h$ AS_IF([test $BUILD_VERSION != ":%h$"], [AC_MSG_RESULT([$BUILD_VERSION (archive hash)])], [test -x $GIT && test -d .git], [ BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT([$BUILD_VERSION (git hash)]) ], [ # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT([$BUILD_VERSION (directory name)]) ]) AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(HAVE_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) dnl libdbus 1.5.12+ (2012-03) / 1.6.0+ (2012-06) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) AS_IF([test $HAVE_dbus = 0], [PC_NAME_DBUS=""], [PC_NAME_DBUS="dbus-1"]) AC_SUBST(PC_NAME_DBUS) AS_CASE([$enable_systemd], [$REQUIRED], [ AS_IF([test $HAVE_dbus = 0], [AC_MSG_FAILURE([Cannot support systemd resources without DBus])]) AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"], [AC_MSG_FAILURE([Cannot support systemd resources without monotonic clock])]) AS_IF([check_systemdsystemunitdir], [], [AC_MSG_FAILURE([Cannot support systemd resources without systemdsystemunitdir])]) ], [$OPTIONAL], [ AS_IF([test $HAVE_dbus = 0 \ || test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [enable_systemd=$DISABLED], [ AC_MSG_CHECKING([for systemd version (using dbus-send)]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) AS_IF([test x"$ret" != x"unavailable" \ || systemctl --version 2>/dev/null | grep -q systemd], [ AS_IF([check_systemdsystemunitdir], [enable_systemd=$REQUIRED], [enable_systemd=$DISABLED]) ], [enable_systemd=$DISABLED] ) ]) ], ) AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AS_IF([test $enable_systemd -eq $DISABLED], [AC_MSG_RESULT([no])], [ AC_MSG_RESULT([yes]) PCMK_FEATURES="$PCMK_FEATURES systemd" ] ) AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED([SUPPORT_SYSTEMD], [$enable_systemd], [Support systemd resources]) AM_CONDITIONAL([BUILD_SYSTEMD], [test $enable_systemd = $REQUIRED]) AC_SUBST(SUPPORT_SYSTEMD) AS_CASE([$enable_upstart], [$REQUIRED], [ AS_IF([test $HAVE_dbus = 0], [AC_MSG_FAILURE([Cannot support Upstart resources without DBus])]) ], [$OPTIONAL], [ AS_IF([test $HAVE_dbus = 0], [enable_upstart=$DISABLED], [ AC_MSG_CHECKING([for Upstart version (using dbus-send)]) ret=$({ dbus-send --system --print-reply \ --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) AS_IF([test x"$ret" != x"unavailable" \ || initctl --version 2>/dev/null | grep -q upstart], [enable_upstart=$REQUIRED], [enable_upstart=$DISABLED] ) ]) ], ) AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AS_IF([test $enable_upstart -eq $DISABLED], [AC_MSG_RESULT([no])], [ AC_MSG_RESULT([yes]) PCMK_FEATURES="$PCMK_FEATURES upstart" ] ) AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart], [Support Upstart resources]) AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED]) AC_SUBST(SUPPORT_UPSTART) AS_CASE([$with_nagios], [$REQUIRED], [ AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [AC_MSG_FAILURE([Cannot support nagios resources without monotonic clock])]) ], [$OPTIONAL], [ AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [with_nagios=$DISABLED], [with_nagios=$REQUIRED]) ] ) AS_IF([test $with_nagios -eq $REQUIRED], [PCMK_FEATURES="$PCMK_FEATURES nagios"]) AC_DEFINE_UNQUOTED([SUPPORT_NAGIOS], [$with_nagios], [Support nagios plugins]) AM_CONDITIONAL([BUILD_NAGIOS], [test $with_nagios -eq $REQUIRED]) AS_IF([test x"$NAGIOS_PLUGIN_DIR" = x""], [NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"]) AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) AS_IF([test x"$NAGIOS_METADATA_DIR" = x""], [NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata"]) AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== COROSYNC_LIBS="" AS_CASE([$with_corosync], [$REQUIRED], [ # These will be fatal if unavailable PKG_CHECK_MODULES([cpg], [libcpg]) PKG_CHECK_MODULES([cfg], [libcfg]) PKG_CHECK_MODULES([cmap], [libcmap]) PKG_CHECK_MODULES([quorum], [libquorum]) PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common]) ] [$OPTIONAL], [ PKG_CHECK_MODULES([cpg], [libcpg], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([cfg], [libcfg], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([cmap], [libcmap], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([quorum], [libquorum], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common], [], [with_corosync=$DISABLED]) AS_IF([test $with_corosync -ne $DISABLED], [with_corosync=$REQUIRED]) ] ) AS_IF([test $with_corosync -ne $DISABLED], [ AC_MSG_CHECKING([for Corosync 2 or later]) AC_MSG_RESULT([yes]) CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" CPPFLAGS="$CPPFLAGS `$PKG_CONFIG --cflags-only-I corosync`" COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-ge-2" dnl Shutdown tracking added (back) to corosync Jan 2021 saved_LIBS="$LIBS" LIBS="$LIBS $COROSYNC_LIBS" AC_CHECK_FUNCS([corosync_cfg_trackstart]) LIBS="$saved_LIBS" ] ) AC_DEFINE_UNQUOTED([SUPPORT_COROSYNC], [$with_corosync], [Support the Corosync messaging and membership layer]) AM_CONDITIONAL([BUILD_CS_SUPPORT], [test $with_corosync -eq $REQUIRED]) AC_SUBST([SUPPORT_COROSYNC]) dnl dnl Cluster stack - Sanity dnl AS_IF([test x"$STACKS" != x""], [AC_MSG_NOTICE([Supported stacks:${STACKS}])], [AC_MSG_FAILURE([At least one cluster stack must be supported])]) PCMK_FEATURES="${PCMK_FEATURES}${STACKS}" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== AS_IF([test $with_cibsecrets -ne $DISABLED], [ with_cibsecrets=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED([LRM_CIBSECRETS_DIR], ["$LRM_CIBSECRETS_DIR"], [Location for CIB secrets]) AC_SUBST([LRM_CIBSECRETS_DIR]) ] ) AC_DEFINE_UNQUOTED([SUPPORT_CIBSECRETS], [$with_cibsecrets], [Support CIB secrets]) AM_CONDITIONAL([BUILD_CIBSECRETS], [test $with_cibsecrets -eq $REQUIRED]) dnl ======================================================================== dnl GnuTLS dnl ======================================================================== dnl Require GnuTLS >=2.12.0 (2011-03) for Pacemaker Remote support PC_NAME_GNUTLS="" AS_CASE([$with_gnutls], [$REQUIRED], [ REQUIRE_LIB([gnutls], [gnutls_sec_param_to_pk_bits]) REQUIRE_HEADER([gnutls/gnutls.h]) ], [$OPTIONAL], [ AC_CHECK_LIB([gnutls], [gnutls_sec_param_to_pk_bits], [], [with_gnutls=$DISABLED]) AC_CHECK_HEADERS([gnutls/gnutls.h], [], [with_gnutls=$DISABLED]) ] ) AS_IF([test $with_gnutls -ne $DISABLED], [ PC_NAME_GNUTLS="gnutls" PCMK_FEATURES="$PCMK_FEATURES remote" ] ) AC_SUBST([PC_NAME_GNUTLS]) AM_CONDITIONAL([BUILD_REMOTE], [test $with_gnutls -ne $DISABLED]) # --- ASAN/UBSAN/TSAN (see man gcc) --- # when using SANitizers, we need to pass the -fsanitize.. # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be # specified as first in the list or there will be runtime # issues (for example user has to LD_PRELOAD asan for it to work # properly). AS_IF([test -n "${SANITIZERS}"], [ SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g') for SANITIZER in $SANITIZERS do AS_CASE([$SANITIZER], [asan|ASAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan" PCMK_FEATURES="$PCMK_FEATURES asan" REQUIRE_LIB([asan],[main]) ], [ubsan|UBSAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan" PCMK_FEATURES="$PCMK_FEATURES ubsan" REQUIRE_LIB([ubsan],[main]) ], [tsan|TSAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan" PCMK_FEATURES="$PCMK_FEATURES tsan" REQUIRE_LIB([tsan],[main]) ]) done ]) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. AS_IF([export | fgrep " CFLAGS=" > /dev/null], [ SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS ]) CC_EXTRAS="" AS_IF([test x"$GCC" != x"yes"], [CFLAGS="$CFLAGS -g"], [ CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags AS_IF([cc_supports_flag "-Wformat-nonliteral"], [gcc_format_nonliteral=yes], [gcc_format_nonliteral=no]) # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline" EXTRA_FLAGS="$EXTRA_FLAGS -Wall" EXTRA_FLAGS="$EXTRA_FLAGS -Waggregate-return" EXTRA_FLAGS="$EXTRA_FLAGS -Wbad-function-cast" EXTRA_FLAGS="$EXTRA_FLAGS -Wcast-align" EXTRA_FLAGS="$EXTRA_FLAGS -Wdeclaration-after-statement" EXTRA_FLAGS="$EXTRA_FLAGS -Wendif-labels" EXTRA_FLAGS="$EXTRA_FLAGS -Wfloat-equal" EXTRA_FLAGS="$EXTRA_FLAGS -Wformat-security" EXTRA_FLAGS="$EXTRA_FLAGS -Wimplicit-fallthrough" EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-prototypes" EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-declarations" EXTRA_FLAGS="$EXTRA_FLAGS -Wnested-externs" EXTRA_FLAGS="$EXTRA_FLAGS -Wno-long-long" EXTRA_FLAGS="$EXTRA_FLAGS -Wno-strict-aliasing" EXTRA_FLAGS="$EXTRA_FLAGS -Wpointer-arith" EXTRA_FLAGS="$EXTRA_FLAGS -Wstrict-prototypes" EXTRA_FLAGS="$EXTRA_FLAGS -Wwrite-strings" EXTRA_FLAGS="$EXTRA_FLAGS -Wunused-but-set-variable" EXTRA_FLAGS="$EXTRA_FLAGS -Wunsigned-char" AS_IF([test x"$gcc_diagnostic_push_pull" = x"yes"], [ AC_DEFINE([HAVE_FORMAT_NONLITERAL], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" ], [test x"$gcc_format_nonliteral" = x"yes"], [EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"]) # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do AS_IF([cc_supports_flag $CC_EXTRAS $j], [CC_EXTRAS="$CC_EXTRAS $j"]) done AC_MSG_NOTICE([Using additional gcc flags: ${CC_EXTRAS}]) ]) dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl pacemaker-based, pacemaker-fenced, pacemaker-remoted, dnl pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl AS_IF([test $enable_hardening -eq $OPTIONAL], [ AS_IF([test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0], [enable_hardening=$REQUIRED], [AC_MSG_NOTICE([Hardening: using custom flags from environment])] ) ], [ unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB ] ) AS_CASE([$enable_hardening], [$DISABLED], [AC_MSG_NOTICE([Hardening: explicitly disabled])], [$REQUIRED], [ CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 stackprot="none" # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" relro=1 ]) # daemons: PIE for both CFLAGS and LDFLAGS AS_IF([cc_supports_flag -fPIE], [ flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [ CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE" LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" pie=1 ]) ] ) # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries AS_IF([test "${relro}" = 1 && test "${pie}" = 1], [ flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" bindnow=1 ]) ] ) AS_IF([test "${bindnow}" = 1], [ flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" ]) ]) # universal: prefer strong > all > default stack protector if possible flag= AS_IF([cc_supports_flag -fstack-protector-strong], [ flag="-fstack-protector-strong" stackprot="strong" ], [cc_supports_flag -fstack-protector-all], [ flag="-fstack-protector-all" stackprot="all" ], [cc_supports_flag -fstack-protector], [ flag="-fstack-protector" stackprot="default" ] ) AS_IF([test -n "${flag}"], [CC_EXTRAS="${CC_EXTRAS} ${flag}"]) # universal: enable stack clash protection if possible AS_IF([cc_supports_flag -fstack-clash-protection], [ CC_EXTRAS="${CC_EXTRAS} -fstack-clash-protection" AS_IF([test "${stackprot}" = "none"], [stackprot="clash-only"], [stackprot="${stackprot}+clash"] ) ] ) # Log a summary AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test x"${stackprot}" != x"none"], [AC_MSG_NOTICE(m4_normalize([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${stackprot}])) ], [AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])] ) ], ) CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS" LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS" CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE" LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl AS_IF([test $enable_fatal_warnings -ne $DISABLED], [ AC_MSG_NOTICE([Enabling fatal compiler warnings]) CFLAGS="$CFLAGS $WERROR" ]) AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output AS_IF([test $enable_quiet -ne $DISABLED], [ AC_MSG_NOTICE([Suppressing make details]) QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant ], [ QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" ] ) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" # Make features list available (sorted alphabetically, without leading space) PCMK_FEATURES=`echo "$PCMK_FEATURES" | sed -e 's/^ //' -e 's/ /\n/g' | sort | xargs` AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) dnl Files we output that need to be executable -CONFIG_FILES_EXEC([cts/cts-cli], +CONFIG_FILES_EXEC([cts/cts-attrd], + [cts/cts-cli], [cts/cts-exec], [cts/cts-fencing], [cts/cts-regression], [cts/cts-scheduler], [cts/lxc_autogen.sh], [cts/benchmark/clubench], [cts/lab/CTSlab.py], [cts/lab/OCFIPraTest.py], [cts/lab/cluster_test], [cts/lab/cts], [cts/lab/cts-log-watcher], [cts/support/LSBDummy], [cts/support/cts-support], [cts/support/fence_dummy], [cts/support/pacemaker-cts-dummyd], [daemons/fenced/fence_legacy], [daemons/fenced/fence_watchdog], [doc/abi-check], [extra/resources/ClusterMon], [extra/resources/HealthSMART], [extra/resources/SysInfo], [extra/resources/controld], [extra/resources/ifspeed], [extra/resources/o2cb], [maint/bumplibs], [tools/crm_failcount], [tools/crm_master], [tools/crm_report], [tools/crm_standby], [tools/cibsecret], [tools/pcmk_simtimes]) dnl Other files we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/benchmark/Makefile \ cts/lab/Makefile \ cts/scheduler/Makefile \ cts/scheduler/dot/Makefile \ cts/scheduler/exp/Makefile \ cts/scheduler/scores/Makefile \ cts/scheduler/stderr/Makefile \ cts/scheduler/summary/Makefile \ cts/scheduler/xml/Makefile \ cts/support/Makefile \ cts/support/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/schedulerd/Makefile \ devel/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/sphinx/Makefile \ etc/Makefile \ etc/init.d/pacemaker \ etc/logrotate.d/pacemaker \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ lib/Makefile \ lib/cib/Makefile \ lib/cluster/Makefile \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/acl/Makefile \ lib/common/tests/agents/Makefile \ lib/common/tests/cmdline/Makefile \ lib/common/tests/flags/Makefile \ lib/common/tests/health/Makefile \ lib/common/tests/io/Makefile \ lib/common/tests/iso8601/Makefile \ lib/common/tests/lists/Makefile \ lib/common/tests/nvpair/Makefile \ lib/common/tests/operations/Makefile \ lib/common/tests/options/Makefile \ lib/common/tests/output/Makefile \ lib/common/tests/procfs/Makefile \ lib/common/tests/results/Makefile \ lib/common/tests/scores/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/common/tests/xml/Makefile \ lib/common/tests/xpath/Makefile \ lib/fencing/Makefile \ lib/gnu/Makefile \ lib/libpacemaker.pc \ lib/lrmd/Makefile \ lib/pacemaker/Makefile \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-cluster.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/native/Makefile \ lib/pengine/tests/rules/Makefile \ lib/pengine/tests/status/Makefile \ lib/pengine/tests/unpack/Makefile \ lib/pengine/tests/utils/Makefile \ lib/services/Makefile \ maint/Makefile \ po/Makefile.in \ python/Makefile \ python/setup.py \ python/pacemaker/Makefile \ python/pacemaker/_cts/Makefile \ python/pacemaker/buildoptions.py \ python/tests/Makefile \ replace/Makefile \ rpm/Makefile \ tests/Makefile \ tools/Makefile \ tools/crm_mon.service \ tools/crm_mon.upstart \ tools/report.collector \ tools/report.common \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_NOTICE([]) AC_MSG_NOTICE([$PACKAGE configuration:]) AC_MSG_NOTICE([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_NOTICE([ Features = ${PCMK_FEATURES}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ Prefix = ${prefix}]) AC_MSG_NOTICE([ Executables = ${sbindir}]) AC_MSG_NOTICE([ Man pages = ${mandir}]) AC_MSG_NOTICE([ Libraries = ${libdir}]) AC_MSG_NOTICE([ Header files = ${includedir}]) AC_MSG_NOTICE([ Arch-independent files = ${datadir}]) AC_MSG_NOTICE([ State information = ${localstatedir}]) AC_MSG_NOTICE([ System configuration = ${sysconfdir}]) AC_MSG_NOTICE([ OCF agents = ${OCF_ROOT_DIR}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_NOTICE([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ CFLAGS = ${CFLAGS}]) AC_MSG_NOTICE([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_NOTICE([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_NOTICE([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_NOTICE([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_NOTICE([ Libraries = ${LIBS}]) AC_MSG_NOTICE([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_NOTICE([ Unix socket auth method = ${us_auth}]) diff --git a/cts/Makefile.am b/cts/Makefile.am index c49e0145c3..a2e67384e7 100644 --- a/cts/Makefile.am +++ b/cts/Makefile.am @@ -1,68 +1,69 @@ # -# Copyright 2001-2022 the Pacemaker project contributors +# Copyright 2001-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # MAINTAINERCLEANFILES = Makefile.in # Test commands and globally applicable test files should be in $(testdir), # and command-specific test data should be in a command-specific subdirectory. testdir = $(datadir)/$(PACKAGE)/tests -test_SCRIPTS = cts-cli \ +test_SCRIPTS = cts-attrd \ + cts-cli \ cts-exec \ cts-fencing \ cts-regression \ cts-scheduler dist_test_DATA = README.md \ valgrind-pcmk.suppressions ctsdir = $(testdir)/cts cts_SCRIPTS = lxc_autogen.sh clidir = $(testdir)/cli dist_cli_DATA = cli/constraints.xml \ cli/crmadmin-cluster-remote-guest-nodes.xml \ cli/crm_diff_new.xml \ cli/crm_diff_old.xml \ cli/crm_mon.xml \ cli/crm_mon-feature_set.xml \ cli/crm_mon-partial.xml \ cli/crm_mon-rsc-maint.xml \ cli/crm_mon-T180.xml \ cli/crm_mon-unmanaged.xml \ cli/crm_resource_digests.xml \ cli/regression.acls.exp \ cli/regression.crm_mon.exp \ cli/regression.daemons.exp \ cli/regression.dates.exp \ cli/regression.error_codes.exp \ cli/regression.feature_set.exp \ cli/regression.rules.exp \ cli/regression.tools.exp \ cli/regression.upgrade.exp \ cli/regression.validity.exp \ cli/regression.access_render.exp scheduler-list: @for T in "$(srcdir)"/scheduler/xml/*.xml; do \ echo $$(basename $$T .xml); \ done CLEANFILES = $(builddir)/.regression.failed.diff clean-local: rm -f scheduler/*/*.pe SUBDIRS = benchmark lab scheduler support cts-support-install: $(MAKE) $(AM_MAKEFLAGS) -C support cts-support $(builddir)/support/cts-support install cts-support-uninstall: $(MAKE) $(AM_MAKEFLAGS) -C support cts-support $(builddir)/support/cts-support uninstall diff --git a/cts/cts-attrd.in b/cts/cts-attrd.in new file mode 100644 index 0000000000..28d963959c --- /dev/null +++ b/cts/cts-attrd.in @@ -0,0 +1,324 @@ +#!@PYTHON@ +""" Regression tests for Pacemaker's attribute daemon +""" + +# pylint doesn't like the module name "cts-attrd" which is an invalid complaint for this file +# but probably something we want to continue warning about elsewhere +# pylint: disable=invalid-name +# pacemaker imports need to come after we modify sys.path, which pylint will complain about. +# pylint: disable=wrong-import-position + +__copyright__ = "Copyright 2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +import argparse +import os +import subprocess +import sys +import tempfile + +# These imports allow running from a source checkout after running `make`. +# Note that while this doesn't necessarily mean it will successfully run tests, +# but being able to see --help output can be useful. +if os.path.exists("@abs_top_srcdir@/python"): + sys.path.insert(0, "@abs_top_srcdir@/python") + +# pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant +if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@": + sys.path.insert(0, "@abs_top_builddir@/python") + +from pacemaker.buildoptions import BuildOptions +from pacemaker.exitstatus import ExitStatus +from pacemaker._cts.corosync import Corosync +from pacemaker._cts.process import killall, exit_if_proc_running +from pacemaker._cts.test import Test, Tests + +TEST_DIR = sys.path[0] + +def update_path(): + """ Set the PATH environment variable appropriately for the tests """ + + new_path = os.environ['PATH'] + if os.path.exists("%s/cts-attrd.in" % TEST_DIR): + # pylint: disable=protected-access + print("Running tests from the source tree: %s (%s)" % (BuildOptions._BUILD_DIR, TEST_DIR)) + # For pacemaker-attrd + new_path = "%s/daemons/attrd:%s" % (BuildOptions._BUILD_DIR, new_path) + + else: + print("Running tests from the install tree: %s (not %s)" % (BuildOptions.DAEMON_DIR, TEST_DIR)) + # For pacemaker-attrd + new_path = "%s:%s" % (BuildOptions.DAEMON_DIR, new_path) + + print('Using PATH="%s"' % new_path) + os.environ['PATH'] = new_path + + +class AttributeTest(Test): + """ Executor for a single test """ + + def __init__(self, name, description, **kwargs): + Test.__init__(self, name, description, **kwargs) + + self._daemon_location = "pacemaker-attrd" + self._enable_corosync = True + + def _kill_daemons(self): + killall([self._daemon_location]) + + def _start_daemons(self): + if self.verbose: + print("Starting %s" % self._daemon_location) + + cmd = [self._daemon_location, "-s", "-l", self.logpath] + # pylint: disable=consider-using-with + self._daemon_process = subprocess.Popen(cmd) + + +class AttributeTests(Tests): + """ Collection of all attribute regression tests """ + + def __init__(self, **kwargs): + Tests.__init__(self, **kwargs) + + self._corosync = Corosync(self.verbose, self.logdir, "cts-attrd") + + def new_test(self, name, description): + """ Create a named test """ + + test = AttributeTest(name, description, verbose=self.verbose, logdir=self.logdir) + self._tests.append(test) + return test + + def setup_environment(self, use_corosync): + """ Prepare the host before executing any tests """ + + if use_corosync: + self._corosync.start(kill_first=True) + + def cleanup_environment(self, use_corosync): + """ Clean up the host after executing desired tests """ + + if use_corosync: + self._corosync.stop() + + def build_basic_tests(self): + """ Add basic tests - setting, querying, updating, and deleting attributes """ + + test = self.new_test("set_attr_1", + "Set and query an attribute") + test.add_cmd("attrd_updater", "--name AAA -U 111 --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name AAA -Q --output-as=xml", + "name=\"AAA\" value=\"111\"") + test.add_log_pattern(r"Setting AAA\[.*\] in instance_attributes: \(unset\) -> 111", + regex=True) + + # Setting the delay on an attribute that doesn't exist fails, but the failure is + # not passed back to attrd_updater. + test = self.new_test("set_attr_2", + "Set an attribute's delay") + test.add_cmd("attrd_updater", "--name AAA -Y -d 5 --output-as=xml") + test.add_log_pattern(r"Processed update-delay request from client .*: Error \(Attribute AAA does not exist\)", + regex=True) + + test = self.new_test("set_attr_3", + "Set and query an attribute's delay and value") + test.add_cmd("attrd_updater", "--name AAA -B 111 -d 5 --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name AAA -Q --output-as=xml", + "name=\"AAA\" value=\"111\"") + test.add_log_pattern(r"Setting AAA\[.*\] in instance_attributes: \(unset\) -> 111 | from .* with 5s write delay", + regex=True) + + test = self.new_test("update_attr_1", + "Update an attribute that already exists") + test.add_cmd("attrd_updater", "--name BBB -U 222 --output-as=xml") + test.add_cmd("attrd_updater", "--name BBB -U 333 --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name BBB -Q --output-as=xml", + "name=\"BBB\" value=\"333\"") + test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: \(unset\) -> 222", + regex=True) + test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: 222 -> 333", + regex=True) + + test = self.new_test("update_attr_delay_1", + "Update the delay of an attribute that already exists") + test.add_cmd("attrd_updater", "--name BBB -U 222 --output-as=xml") + test.add_cmd("attrd_updater", "--name BBB -Y -d 5 --output-as=xml") + test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: \(unset\) -> 222", + regex=True) + test.add_log_pattern("Update attribute BBB delay to 5000ms (5)") + + test = self.new_test("update_attr_delay_2", + "Update the delay and value of an attribute that already exists") + test.add_cmd("attrd_updater", "--name BBB -U 222 --output-as=xml") + test.add_cmd("attrd_updater", "--name BBB -B 333 -d 5 --output-as=xml") + test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: \(unset\) -> 222", + regex=True) + test.add_log_pattern("Update attribute BBB delay to 5000ms (5)") + test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: 222 -> 333", + regex=True) + + test = self.new_test("missing_attr_1", + "Query an attribute that does not exist") + test.add_cmd_expected_fail("attrd_updater", "--name NOSUCH --output-as=xml", + ExitStatus.CONFIG) + + test = self.new_test("delete_attr_1", + "Delete an existing attribute") + test.add_cmd("attrd_updater", "--name CCC -U 444 --output-as=xml") + test.add_cmd("attrd_updater", "--name CCC -D --output-as=xml") + test.add_log_pattern(r"Setting CCC\[.*\] in instance_attributes: \(unset\) -> 444", + regex=True) + test.add_log_pattern(r"Setting CCC\[.*\] in instance_attributes: 444 -> \(unset\)", + regex=True) + + test = self.new_test("missing_attr_2", + "Delete an attribute that does not exist") + test.add_cmd("attrd_updater", "--name NOSUCH2 -D --output-as=xml") + + test = self.new_test("attr_in_set_1", + "Set and query an attribute in a specific set") + test.add_cmd("attrd_updater", "--name DDD -U 555 --set=foo --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name DDD -Q --output-as=xml", + "name=\"DDD\" value=\"555\"") + test.add_log_pattern("Processed 1 private change for DDD, id=n/a, set=foo") + + def build_regex_tests(self): + """ Add tests that use regexes """ + + test = self.new_test("regex_update_1", + "Update attributes using a regex") + test.add_cmd("attrd_updater", "--name AAA -U 111 --output-as=xml") + test.add_cmd("attrd_updater", "--name ABB -U 222 --output-as=xml") + test.add_cmd("attrd_updater", "-P 'A.*' -U 333 --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name AAA -Q --output-as=xml", + "name=\"AAA\" value=\"333\"") + test.add_cmd_check_stdout("attrd_updater", "--name ABB -Q --output-as=xml", + "name=\"ABB\" value=\"333\"") + test.add_log_pattern(r"Setting AAA\[.*\] in instance_attributes: \(unset\) -> 111", + regex=True) + test.add_log_pattern(r"Setting ABB\[.*\] in instance_attributes: \(unset\) -> 222", + regex=True) + test.add_log_pattern(r"Setting ABB\[.*\] in instance_attributes: 222 -> 333", + regex=True) + test.add_log_pattern(r"Setting AAA\[.*\] in instance_attributes: 111 -> 333", + regex=True) + + test = self.new_test("regex_delete_1", + "Delete attributes using a regex") + test.add_cmd("attrd_updater", "--name XAX -U 444 --output-as=xml") + test.add_cmd("attrd_updater", "--name XBX -U 555 --output-as=xml") + test.add_cmd("attrd_updater", "-P 'X[A|B]X' -D --output-as=xml") + test.add_log_pattern(r"Setting XAX\[.*\] in instance_attributes: \(unset\) -> 444", + regex=True) + test.add_log_pattern(r"Setting XBX\[.*\] in instance_attributes: \(unset\) -> 555", + regex=True) + test.add_log_pattern(r"Setting XBX\[.*\] in instance_attributes: 555 -> \(unset\)", + regex=True) + test.add_log_pattern(r"Setting XAX\[.*\] in instance_attributes: 444 -> \(unset\)", + regex=True) + + def build_utilization_tests(self): + """ Add tests that involve utilization attributes """ + + test = self.new_test("utilization_1", + "Set and query a utilization attribute") + test.add_cmd("attrd_updater", "--name AAA -U ABC -z --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name AAA -Q --output-as=xml", + "name=\"AAA\" value=\"ABC\"") + test.add_log_pattern(r"Setting AAA\[.*\] in utilization: \(unset\) -> ABC", + regex=True) + + def build_sync_point_tests(self): + """ Add tests that involve sync points """ + + test = self.new_test("local_sync_point", + "Wait for a local sync point") + test.add_cmd("attrd_updater", "--name AAA -U 123 --wait=local --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name AAA -Q --output-as=xml", + "name=\"AAA\" value=\"123\"") + test.add_log_pattern(r"Alerting client .* for reached local sync point", + regex=True) + + test = self.new_test("cluster_sync_point", + "Wait for a cluster-wide sync point") + test.add_cmd("attrd_updater", "--name BBB -U 456 --wait=cluster --output-as=xml") + test.add_cmd_check_stdout("attrd_updater", "--name BBB -Q --output-as=xml", + "name=\"BBB\" value=\"456\"") + test.add_log_pattern(r"Alerting client .* for reached cluster sync point", + regex=True) + + +def build_options(): + """ Handle command line arguments """ + + parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, + description="Run pacemaker-attrd regression tests", + epilog="Example: Run only the test 'start_stop'\n" + "\t " + sys.argv[0] + " --run-only start_stop\n\n" + "Example: Run only the tests with the string 'systemd' present in them\n" + "\t " + sys.argv[0] + " --run-only-pattern systemd") + parser.add_argument("-l", "--list-tests", action="store_true", + help="Print out all registered tests") + parser.add_argument("-p", "--run-only-pattern", metavar='PATTERN', + help="Run only tests matching the given pattern") + parser.add_argument("-r", "--run-only", metavar='TEST', + help="Run a specific test") + parser.add_argument("-V", "--verbose", action="store_true", + help="Verbose output") + + args = parser.parse_args() + return args + + +def main(): + """ Run attrd regression tests as specified by arguments """ + + update_path() + + # Ensure all command output is in portable locale for comparison + os.environ['LC_ALL'] = "C" + + opts = build_options() + + exit_if_proc_running("pacemaker-attrd") + + # Create a temporary directory for log files (the directory and its + # contents will automatically be erased when done) + with tempfile.TemporaryDirectory(prefix="cts-attrd-") as logdir: + tests = AttributeTests(verbose=opts.verbose, logdir=logdir) + + tests.build_basic_tests() + tests.build_regex_tests() + tests.build_utilization_tests() + tests.build_sync_point_tests() + + if opts.list_tests: + tests.print_list() + sys.exit(ExitStatus.OK) + + print("Starting ...") + + try: + tests.setup_environment(True) + except TimeoutError: + print("corosync did not start in time, exiting") + sys.exit(ExitStatus.TIMEOUT) + + if opts.run_only_pattern: + tests.run_tests_matching(opts.run_only_pattern) + tests.print_results() + elif opts.run_only: + tests.run_single(opts.run_only) + tests.print_results() + else: + tests.run_tests() + tests.print_results() + + tests.cleanup_environment(True) + + tests.exit() + + +if __name__ == "__main__": + main() diff --git a/cts/cts-exec.in b/cts/cts-exec.in index 539c54cc17..a89eb0ed53 100644 --- a/cts/cts-exec.in +++ b/cts/cts-exec.in @@ -1,1008 +1,963 @@ #!@PYTHON@ """ Regression tests for Pacemaker's pacemaker-execd """ __copyright__ = "Copyright 2012-2023 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" +import argparse import io import os import psutil import stat import sys import subprocess import shutil import tempfile import time # Where to find test binaries # Prefer the source tree if available TEST_DIR = sys.path[0] # These imports allow running from a source checkout after running `make`. # Note that while this doesn't necessarily mean it will successfully run tests, # but being able to see --help output can be useful. if os.path.exists("@abs_top_srcdir@/python"): sys.path.insert(0, "@abs_top_srcdir@/python") if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@": sys.path.insert(0, "@abs_top_builddir@/python") from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus from pacemaker._cts.errors import ExitCodeError, OutputFoundError, OutputNotFoundError from pacemaker._cts.process import killall, exit_if_proc_running, pipe_communicate, stdout_from_command from pacemaker._cts.test import Test, Tests # File permissions for executable scripts we create EXECMODE = stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH def update_path(): """ Set the PATH environment variable appropriately for the tests """ new_path = os.environ['PATH'] if os.path.exists("%s/cts-exec.in" % TEST_DIR): print("Running tests from the source tree: %s (%s)" % (BuildOptions._BUILD_DIR, TEST_DIR)) # For pacemaker-execd, cts-exec-helper, and pacemaker-remoted new_path = "%s/daemons/execd:%s" % (BuildOptions._BUILD_DIR, new_path) new_path = "%s/tools:%s" % (BuildOptions._BUILD_DIR, new_path) # For crm_resource # For pacemaker-fenced new_path = "%s/daemons/fenced:%s" % (BuildOptions._BUILD_DIR, new_path) # For cts-support new_path = "%s/cts/support:%s" % (BuildOptions._BUILD_DIR, new_path) else: print("Running tests from the install tree: %s (not %s)" % (BuildOptions.DAEMON_DIR, TEST_DIR)) # For cts-exec-helper, cts-support, pacemaker-execd, pacemaker-fenced, # and pacemaker-remoted new_path = "%s:%s" % (BuildOptions.DAEMON_DIR, new_path) - print('Using PATH="{}"'.format(new_path)) + print('Using PATH="%s"' % new_path) os.environ['PATH'] = new_path class ExecTest(Test): """ Executor for a single pacemaker-execd regression test """ def __init__(self, name, description, **kwargs): Test.__init__(self, name, description, **kwargs) self.tls = kwargs.get("tls", False) if self.tls: self._daemon_location = "pacemaker-remoted" else: self._daemon_location = "pacemaker-execd" - self._execd_process = None self._test_tool_location = "cts-exec-helper" + # We additionally need to keep track of a stonith process. + self._stonith_process = None + def _new_cmd(self, cmd, args, exitcode, **kwargs): """ Add a command to be executed as part of this test """ if self.verbose and cmd == self._test_tool_location: args += " -V " if (cmd == self._test_tool_location) and self.tls: args += " -S " kwargs["validate"] = False kwargs["check_rng"] = False kwargs["check_stderr"] = False Test._new_cmd(self, cmd, args, exitcode, **kwargs) def _kill_daemons(self): killall([ "pacemaker-fenced", "lt-pacemaker-fenced", "pacemaker-execd", "lt-pacemaker-execd", "cts-exec-helper", "lt-cts-exec-helper", "pacemaker-remoted", ]) def _start_daemons(self): if not self.tls: self._stonith_process = subprocess.Popen(["pacemaker-fenced", "-s"]) cmd = [self._daemon_location, "-l", self.logpath] if self.verbose: cmd += ["-V"] - self._execd_process = subprocess.Popen(cmd) + self._daemon_process = subprocess.Popen(cmd) def clean_environment(self): """ Clean up the host after running a test """ - if self._execd_process: - self._execd_process.terminate() - self._execd_process.wait() + if self._daemon_process: + self._daemon_process.terminate() + self._daemon_process.wait() if self.verbose: print("Daemon Output Start") logfile = io.open(self.logpath, 'rt', errors='replace') for line in logfile: print(line.strip().encode('utf-8', 'replace')) print("Daemon Output End") if self._stonith_process: self._stonith_process.terminate() self._stonith_process.wait() - self._execd_process = None + self._daemon_process = None self._stonith_process = None def add_cmd(self, args): """ Add a cts-exec-helper command to be executed as part of this test """ self._new_cmd(self._test_tool_location, args, ExitStatus.OK) def add_cmd_and_kill(self, args, kill_proc): """ Add a cts-exec-helper command and system command to be executed as part of this test """ self._new_cmd(self._test_tool_location, args, ExitStatus.OK, kill=kill_proc) def add_cmd_check_stdout(self, args, match, no_match=None): """ Add a command with expected output to be executed as part of this test """ self._new_cmd(self._test_tool_location, args, ExitStatus.OK, stdout_match=match, stdout_negative_match=no_match) def add_cmd_expected_fail(self, args, exitcode=ExitStatus.ERROR): """ Add a cts-exec-helper command to be executed as part of this test and expected to fail """ self._new_cmd(self._test_tool_location, args, exitcode) def add_sys_cmd(self, cmd, args): """ Add a simple command to be executed as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK) def run(self): """ Execute this test. """ if self.tls and self.name.count("stonith") != 0: self._result_txt = "SKIPPED - '%s' - disabled when testing pacemaker_remote" % (self.name) print(self._result_txt) return Test.run(self) class ExecTests(Tests): """ Collection of all pacemaker-execd regression tests """ def __init__(self, **kwargs): Tests.__init__(self, **kwargs) self.tls = kwargs.get("tls", False) self._action_timeout = " -t 9000 " self._installed_files = [] self._rsc_classes = self._setup_rsc_classes() print("Testing resource classes", repr(self._rsc_classes)) self._common_cmds = { "ocf_reg_line" : "-c register_rsc -r ocf_test_rsc "+self._action_timeout+" -C ocf -P pacemaker -T Dummy", "ocf_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"", "ocf_unreg_line" : "-c unregister_rsc -r \"ocf_test_rsc\" "+self._action_timeout, "ocf_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"", "ocf_start_line" : "-c exec -r \"ocf_test_rsc\" -a \"start\" "+self._action_timeout, "ocf_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:start rc:ok op_status:complete\" ", "ocf_stop_line" : "-c exec -r \"ocf_test_rsc\" -a \"stop\" "+self._action_timeout, "ocf_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:stop rc:ok op_status:complete\" ", "ocf_monitor_line" : '-c exec -r ocf_test_rsc -a monitor -i 2s ' + self._action_timeout, "ocf_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout, "ocf_cancel_line" : '-c cancel -r ocf_test_rsc -a monitor -i 2s ' + self._action_timeout, "ocf_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "systemd_reg_line" : "-c register_rsc -r systemd_test_rsc " + self._action_timeout + " -C systemd -T pacemaker-cts-dummyd@3", "systemd_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"", "systemd_unreg_line" : "-c unregister_rsc -r \"systemd_test_rsc\" "+self._action_timeout, "systemd_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"", "systemd_start_line" : "-c exec -r \"systemd_test_rsc\" -a \"start\" "+self._action_timeout, "systemd_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:start rc:ok op_status:complete\" ", "systemd_stop_line" : "-c exec -r \"systemd_test_rsc\" -a \"stop\" "+self._action_timeout, "systemd_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:stop rc:ok op_status:complete\" ", "systemd_monitor_line" : '-c exec -r systemd_test_rsc -a monitor -i 2s ' + self._action_timeout, "systemd_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:complete\" -t 15000 ", "systemd_cancel_line" : '-c cancel -r systemd_test_rsc -a monitor -i 2s ' + self._action_timeout, "systemd_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "upstart_reg_line" : "-c register_rsc -r upstart_test_rsc "+self._action_timeout+" -C upstart -T pacemaker-cts-dummyd", "upstart_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"", "upstart_unreg_line" : "-c unregister_rsc -r \"upstart_test_rsc\" "+self._action_timeout, "upstart_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"", "upstart_start_line" : "-c exec -r \"upstart_test_rsc\" -a \"start\" "+self._action_timeout, "upstart_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:start rc:ok op_status:complete\" ", "upstart_stop_line" : "-c exec -r \"upstart_test_rsc\" -a \"stop\" "+self._action_timeout, "upstart_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:stop rc:ok op_status:complete\" ", "upstart_monitor_line" : '-c exec -r upstart_test_rsc -a monitor -i 2s ' + self._action_timeout, "upstart_monitor_event" : '-l "NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:complete" -t 15000', "upstart_cancel_line" : '-c cancel -r upstart_test_rsc -a monitor -i 2s ' + self._action_timeout, "upstart_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "service_reg_line" : "-c register_rsc -r service_test_rsc "+self._action_timeout+" -C service -T LSBDummy", "service_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:service_test_rsc action:none rc:ok op_status:complete\"", "service_unreg_line" : "-c unregister_rsc -r \"service_test_rsc\" "+self._action_timeout, "service_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:service_test_rsc action:none rc:ok op_status:complete\"", "service_start_line" : "-c exec -r \"service_test_rsc\" -a \"start\" "+self._action_timeout, "service_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:start rc:ok op_status:complete\" ", "service_stop_line" : "-c exec -r \"service_test_rsc\" -a \"stop\" "+self._action_timeout, "service_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:stop rc:ok op_status:complete\" ", "service_monitor_line" : '-c exec -r service_test_rsc -a monitor -i 2s ' + self._action_timeout, "service_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout, "service_cancel_line" : '-c cancel -r service_test_rsc -a monitor -i 2s ' + self._action_timeout, "service_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "lsb_reg_line" : "-c register_rsc -r lsb_test_rsc "+self._action_timeout+" -C lsb -T LSBDummy", "lsb_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\" ", "lsb_unreg_line" : "-c unregister_rsc -r \"lsb_test_rsc\" "+self._action_timeout, "lsb_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\"", "lsb_start_line" : "-c exec -r \"lsb_test_rsc\" -a \"start\" "+self._action_timeout, "lsb_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:start rc:ok op_status:complete\" ", "lsb_stop_line" : "-c exec -r \"lsb_test_rsc\" -a \"stop\" "+self._action_timeout, "lsb_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:stop rc:ok op_status:complete\" ", "lsb_monitor_line" : '-c exec -r lsb_test_rsc -a status -i 2s ' + self._action_timeout, "lsb_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:complete\" "+self._action_timeout, "lsb_cancel_line" : '-c cancel -r lsb_test_rsc -a status -i 2s ' + self._action_timeout, "lsb_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:Cancelled\" ", "stonith_reg_line" : "-c register_rsc -r stonith_test_rsc " + self._action_timeout + " -C stonith -P pacemaker -T fence_dummy", "stonith_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\" ", "stonith_unreg_line" : "-c unregister_rsc -r \"stonith_test_rsc\" "+self._action_timeout, "stonith_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\"", "stonith_start_line" : '-c exec -r stonith_test_rsc -a start ' + self._action_timeout, "stonith_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:start rc:ok op_status:complete\" ", "stonith_stop_line" : "-c exec -r \"stonith_test_rsc\" -a \"stop\" "+self._action_timeout, "stonith_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:stop rc:ok op_status:complete\" ", "stonith_monitor_line" : '-c exec -r stonith_test_rsc -a monitor -i 2s ' + self._action_timeout, "stonith_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout, "stonith_cancel_line" : '-c cancel -r stonith_test_rsc -a monitor -i 2s ' + self._action_timeout, "stonith_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:Cancelled\" ", } def _setup_rsc_classes(self): classes = stdout_from_command(["crm_resource", "--list-standards"]) classes = classes[:-1] # Strip trailing empty line if self.tls: classes.remove("stonith") if "nagios" in classes: classes.remove("nagios") if "systemd" in classes: try: # This code doesn't need this import, but pacemaker-cts-dummyd # does, so ensure the dependency is available rather than cause # all systemd tests to fail. import systemd.daemon except ImportError: print("Python systemd bindings not found.") print("The tests for systemd class are not going to be run.") classes.remove("systemd") return classes def new_test(self, name, description): """ Create a named test """ test = ExecTest(name, description, verbose=self.verbose, tls=self.tls, timeout=self.timeout, force_wait=self.force_wait, logdir=self.logdir) self._tests.append(test) return test def setup_environment(self): """ Prepare the host before executing any tests """ if BuildOptions.REMOTE_ENABLED: os.system("service pacemaker_remote stop") self.cleanup_environment() if self.tls and not os.path.isfile("/etc/pacemaker/authkey"): print("Installing /etc/pacemaker/authkey ...") os.system("mkdir -p /etc/pacemaker") os.system("dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1") self._installed_files.append("/etc/pacemaker/authkey") # If we're in build directory, install agents if not already installed if os.path.exists("%s/cts/cts-exec.in" % BuildOptions._BUILD_DIR): if not os.path.exists("%s/pacemaker" % BuildOptions.OCF_RA_INSTALL_DIR): # @TODO remember which components were created and remove them os.makedirs("%s/pacemaker" % BuildOptions.OCF_RA_INSTALL_DIR, 0o755) for agent in ["Dummy", "Stateful", "ping"]: agent_source = "%s/extra/resources/%s" % (BuildOptions._BUILD_DIR, agent) agent_dest = "%s/pacemaker/%s" % (BuildOptions.OCF_RA_INSTALL_DIR, agent) if not os.path.exists(agent_dest): print("Installing %s ..." % (agent_dest)) shutil.copyfile(agent_source, agent_dest) os.chmod(agent_dest, EXECMODE) self._installed_files.append(agent_dest) subprocess.call(["cts-support", "install"]) def cleanup_environment(self): """ Clean up the host after executing desired tests """ for installed_file in self._installed_files: print("Removing %s ..." % (installed_file)) os.remove(installed_file) subprocess.call(["cts-support", "uninstall"]) def build_generic_tests(self): """ Register tests that apply to all resource classes """ common_cmds = self._common_cmds ### register/unregister tests ### for rsc in self._rsc_classes: test = self.new_test("generic_registration_%s" % (rsc), "Simple resource registration test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### start/stop tests ### for rsc in self._rsc_classes: test = self.new_test("generic_start_stop_%s" % (rsc), "Simple start and stop test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### monitor cancel test ### for rsc in self._rsc_classes: test = self.new_test("generic_monitor_cancel_%s" % (rsc), "Simple monitor cancel test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_cmd_expected_fail(common_cmds["%s_monitor_event" % (rsc)], ExitStatus.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_cmd_expected_fail(common_cmds["%s_monitor_event" % (rsc)], ExitStatus.TIMEOUT) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### monitor duplicate test ### for rsc in self._rsc_classes: test = self.new_test("generic_monitor_duplicate_%s" % (rsc), "Test creation and canceling of duplicate monitors for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) # Add the duplicate monitors test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) # verify we still get update events ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) # cancel the monitor, if the duplicate merged with the original, we should no longer see monitor updates test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_cmd_expected_fail(common_cmds["%s_monitor_event" % (rsc)], ExitStatus.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_cmd_expected_fail(common_cmds["%s_monitor_event" % (rsc)], ExitStatus.TIMEOUT) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### stop implies cancel test ### for rsc in self._rsc_classes: test = self.new_test("generic_stop_implies_cancel_%s" % (rsc), "Verify stopping a resource implies cancel of recurring ops for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_cmd_expected_fail(common_cmds["%s_monitor_event" % (rsc)], ExitStatus.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_cmd_expected_fail(common_cmds["%s_monitor_event" % (rsc)], ExitStatus.TIMEOUT) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) def build_multi_rsc_tests(self): """ Register complex tests that involve managing multiple resouces of different types """ common_cmds = self._common_cmds # do not use service and systemd at the same time, it is the same resource. ### register start monitor stop unregister resources of each type at the same time. ### test = self.new_test("multi_rsc_start_stop_all", "Start, monitor, and stop resources of multiple types and classes") for rsc in self._rsc_classes: test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) for rsc in self._rsc_classes: test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) for rsc in self._rsc_classes: test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) for rsc in self._rsc_classes: ### If this fails, that means the monitor is not being rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) for rsc in self._rsc_classes: test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) for rsc in self._rsc_classes: test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) for rsc in self._rsc_classes: test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) def build_negative_tests(self): """ Register tests related to how pacemaker-execd handles failures """ ### ocf start timeout test ### test = self.new_test("ocf_start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" " + self._action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") # -t must be less than self._action_timeout test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -k \"op_sleep\" -v \"5\" -t 1000 -w") test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:error op_status:Timed Out" ' + self._action_timeout) test.add_cmd("-c exec -r test_rsc -a stop " + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self._action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### stonith start timeout test ### test = self.new_test("stonith_start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd('-c register_rsc -r test_rsc ' + '-C stonith -P pacemaker -T fence_dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete"') test.add_cmd('-c exec -r test_rsc -a start -k monitor_delay -v 30 ' + '-t 1000 -w') # -t must be less than self._action_timeout test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:error op_status:Timed Out" ' + self._action_timeout) test.add_cmd("-c exec -r test_rsc -a stop " + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self._action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### stonith component fail ### common_cmds = self._common_cmds test = self.new_test("stonith_component_fail", "Kill stonith component after pacemaker-execd connects") test.add_cmd(common_cmds["stonith_reg_line"] + " " + common_cmds["stonith_reg_event"]) test.add_cmd(common_cmds["stonith_start_line"] + " " + common_cmds["stonith_start_event"]) test.add_cmd('-c exec -r stonith_test_rsc -a monitor -i 600s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout) test.add_cmd_and_kill('-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:error op_status:error" -t 15000', "killall -9 -q pacemaker-fenced lt-pacemaker-fenced") test.add_cmd(common_cmds["stonith_unreg_line"] + " " + common_cmds["stonith_unreg_event"]) ### monitor fail for ocf resources ### test = self.new_test("monitor_fail_ocf", "Force ocf monitor to fail, verify failure is reported.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" " + self._action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" " + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" " + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"') test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' + self._action_timeout) test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' + self._action_timeout) test.add_cmd_and_kill('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete" ' + self._action_timeout, "rm -f %s/run/Dummy-test_rsc.state" % BuildOptions.LOCAL_STATE_DIR) test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s ' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" " + self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" " + self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" " + self._action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### verify notify changes only for monitor operation. ### test = self.new_test("monitor_changes_only", "Verify when flag is set, only monitor changes are notified.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+" -o " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + ' -o -l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ') test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd_and_kill('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self._action_timeout, 'rm -f %s/run/Dummy-test_rsc.state' % BuildOptions.LOCAL_STATE_DIR) test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd('-c unregister_rsc -r "test_rsc" ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete"') ### monitor fail for systemd resource ### if "systemd" in self._rsc_classes: test = self.new_test("monitor_fail_systemd", "Force systemd monitor to fail, verify failure is reported..") test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T pacemaker-cts-dummyd@3 " + self._action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout) test.add_cmd_and_kill('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self._action_timeout, "pkill -9 -f pacemaker-cts-dummyd") test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### monitor fail for upstart resource ### if "upstart" in self._rsc_classes: test = self.new_test("monitor_fail_upstart", "Force upstart monitor to fail, verify failure is reported..") test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T pacemaker-cts-dummyd "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout) test.add_cmd_and_kill('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self._action_timeout, 'killall -9 -q dd') test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Cancel non-existent operation on a resource ### test = self.new_test("cancel_non_existent_op", "Attempt to cancel the wrong monitor operation, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout) ### interval is wrong, should fail test.add_cmd_expected_fail('-c cancel -r test_rsc -a monitor -i 2s' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") ### action name is wrong, should fail test.add_cmd_expected_fail('-c cancel -r test_rsc -a stop -i 1s' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" " + self._action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Attempt to invoke non-existent rsc id ### test = self.new_test("invoke_non_existent_rsc", "Attempt to perform operations on a non-existent rsc id.") test.add_cmd_expected_fail("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:error op_status:complete\" ") test.add_cmd_expected_fail("-c exec -r test_rsc -a stop "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd_expected_fail('-c exec -r test_rsc -a monitor -i 6s ' + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd_expected_fail("-c cancel -r test_rsc -a start "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register and start a resource that doesn't exist, systemd ### if "systemd" in self._rsc_classes: test = self.new_test("start_uninstalled_systemd", "Register uninstalled systemd agent, try to start, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T this_is_fake1234 "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") if "upstart" in self._rsc_classes: test = self.new_test("start_uninstalled_upstart", "Register uninstalled upstart agent, try to start, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T this_is_fake1234 "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register and start a resource that doesn't exist, ocf ### test = self.new_test("start_uninstalled_ocf", "Register uninstalled ocf agent, try to start, verify expected failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pacemaker -T this_is_fake1234 "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register ocf with non-existent provider ### test = self.new_test("start_ocf_bad_provider", "Register ocf agent with a non-existent provider, verify expected failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pancakes -T Dummy "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register ocf with empty provider field ### test = self.new_test("start_ocf_no_provider", "Register ocf agent with a no provider, verify expected failure.") test.add_cmd_expected_fail("-c register_rsc -r \"test_rsc\" -C ocf -T Dummy "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd_expected_fail("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Error\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") def build_stress_tests(self): """ Register stress tests """ timeout = "-t 20000" iterations = 25 test = self.new_test("ocf_stress", "Verify OCF agent handling works under load") for i in range(iterations): test.add_cmd("-c register_rsc -r rsc_%s %s -C ocf -P heartbeat -T Dummy -l \"NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c exec -r rsc_%s -a start %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd('-c exec -r rsc_%s -a monitor %s -i 1s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete"' % (i, timeout, i)) for i in range(iterations): test.add_cmd("-c exec -r rsc_%s -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c unregister_rsc -r rsc_%s %s -l \"NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) if "systemd" in self._rsc_classes: test = self.new_test("systemd_stress", "Verify systemd dbus connection works under load") for i in range(iterations): test.add_cmd("-c register_rsc -r rsc_%s %s -C systemd -T pacemaker-cts-dummyd@3 -l \"NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c exec -r rsc_%s -a start %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd('-c exec -r rsc_%s -a monitor %s -i 1s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete"' % (i, timeout, i)) for i in range(iterations): test.add_cmd("-c exec -r rsc_%s -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c unregister_rsc -r rsc_%s %s -l \"NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) iterations = 9 timeout = "-t 30000" ### Verify recurring op in-flight collision is handled in series properly test = self.new_test("rsc_inflight_collision", "Verify recurring ops do not collide with other operations for the same rsc.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self._action_timeout) test.add_cmd("-c exec -r test_rsc -a start %s -k op_sleep -v 1 -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\"" % (timeout)) for i in range(iterations): test.add_cmd('-c exec -r test_rsc -a monitor %s -i 100%dms ' '-k op_sleep -v 2 ' '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' % (timeout, i)) test.add_cmd("-c exec -r test_rsc -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\"" % (timeout)) test.add_cmd("-c unregister_rsc -r test_rsc %s -l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\"" % (timeout)) def build_custom_tests(self): """ Register tests that target specific cases """ ### verify resource temporary folder is created and used by OCF agents. ### test = self.new_test("rsc_tmp_dir", "Verify creation and use of rsc temporary state directory") test.add_sys_cmd("ls", "-al %s" % BuildOptions.RSC_TMP_DIR) test.add_cmd("-c register_rsc -r test_rsc -P heartbeat -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self._action_timeout) test.add_cmd("-c exec -r test_rsc -a start -t 4000") test.add_sys_cmd("ls", "-al %s" % BuildOptions.RSC_TMP_DIR) test.add_sys_cmd("ls", "%s/Dummy-test_rsc.state" % BuildOptions.RSC_TMP_DIR) test.add_cmd("-c exec -r test_rsc -a stop -t 4000") test.add_cmd("-c unregister_rsc -r test_rsc "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### start delay then stop test ### test = self.new_test("start_delay", "Verify start delay works as expected.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self._action_timeout) test.add_cmd("-c exec -r test_rsc -s 6000 -a start -w -t 6000") test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 2000", ExitStatus.TIMEOUT) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 6000") test.add_cmd("-c exec -r test_rsc -a stop " + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self._action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### start delay, but cancel before it gets a chance to start. ### test = self.new_test("start_delay_cancel", "Using start_delay, start a rsc, but cancel the start op before execution.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self._action_timeout) test.add_cmd("-c exec -r test_rsc -s 5000 -a start -w -t 4000") test.add_cmd("-c cancel -r test_rsc -a start " + self._action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ") test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 5000", ExitStatus.TIMEOUT) test.add_cmd("-c unregister_rsc -r test_rsc " + self._action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register a bunch of resources, verify we can get info on them ### test = self.new_test("verify_get_rsc_info", "Register multiple resources, verify retrieval of rsc info.") if "systemd" in self._rsc_classes: test.add_cmd("-c register_rsc -r rsc1 -C systemd -T pacemaker-cts-dummyd@3 "+self._action_timeout) test.add_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c unregister_rsc -r rsc1 "+self._action_timeout) test.add_cmd_expected_fail("-c get_rsc_info -r rsc1 ") if "upstart" in self._rsc_classes: test.add_cmd("-c register_rsc -r rsc1 -C upstart -T pacemaker-cts-dummyd "+self._action_timeout) test.add_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c unregister_rsc -r rsc1 "+self._action_timeout) test.add_cmd_expected_fail("-c get_rsc_info -r rsc1 ") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self._action_timeout) test.add_cmd("-c get_rsc_info -r rsc2 ") test.add_cmd("-c unregister_rsc -r rsc2 "+self._action_timeout) test.add_cmd_expected_fail("-c get_rsc_info -r rsc2 ") ### Register duplicate, verify only one entry exists and can still be removed. test = self.new_test("duplicate_registration", "Register resource multiple times, verify only one entry exists and can be removed.") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self._action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self._action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Stateful -P pacemaker "+self._action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Stateful") test.add_cmd("-c unregister_rsc -r rsc2 "+self._action_timeout) test.add_cmd_expected_fail("-c get_rsc_info -r rsc2 ") ### verify the option to only send notification to the original client. ### test = self.new_test("notify_orig_client_only", "Verify option to only send notifications to the client originating the action.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r \"test_rsc\" -a \"monitor\" -i 1s ' + self._action_timeout + ' -n ' '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"') # this will fail because the monitor notifications should only go to the original caller, which no longer exists. test.add_cmd_expected_fail("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self._action_timeout, ExitStatus.TIMEOUT) test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s -t 6000 ') test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self._action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### get metadata ### test = self.new_test("get_ocf_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Dummy\"", "resource-agent name=\"Dummy\"") test.add_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Stateful\"") test.add_cmd_expected_fail("-c metadata -P \"pacemaker\" -T \"Stateful\"") test.add_cmd_expected_fail("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"fake_agent\"") ### get metadata ### test = self.new_test("get_lsb_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"lsb\" -T \"LSBDummy\"", "resource-agent name='LSBDummy'") ### get stonith metadata ### test = self.new_test("get_stonith_metadata", "Retrieve stonith metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy\"", "resource-agent name=\"fence_dummy\"") ### get metadata ### if "systemd" in self._rsc_classes: test = self.new_test("get_systemd_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"systemd\" -T \"pacemaker-cts-dummyd@\"", "resource-agent name=\"pacemaker-cts-dummyd@\"") ### get metadata ### if "upstart" in self._rsc_classes: test = self.new_test("get_upstart_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"upstart\" -T \"pacemaker-cts-dummyd\"", "resource-agent name=\"pacemaker-cts-dummyd\"") ### get ocf providers ### test = self.new_test("list_ocf_providers", "Retrieve list of available resource providers, verifies pacemaker is a provider.") test.add_cmd_check_stdout("-c list_ocf_providers ", "pacemaker") test.add_cmd_check_stdout("-c list_ocf_providers -T ping", "pacemaker") ### Verify agents only exist in their lists ### test = self.new_test("verify_agent_lists", "Verify the agent lists contain the right data.") test.add_cmd_check_stdout("-c list_agents ", "Stateful") ### ocf ### test.add_cmd_check_stdout("-c list_agents -C ocf", "Stateful") test.add_cmd_check_stdout("-c list_agents -C lsb", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C service", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents ", "LSBDummy") ### init.d ### test.add_cmd_check_stdout("-c list_agents -C lsb", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C ocf", "", "pacemaker-cts-dummyd@") ### should not exist test.add_cmd_check_stdout("-c list_agents -C ocf", "", "pacemaker-cts-dummyd@") ### should not exist test.add_cmd_check_stdout("-c list_agents -C lsb", "", "fence_dummy") ### should not exist test.add_cmd_check_stdout("-c list_agents -C service", "", "fence_dummy") ### should not exist test.add_cmd_check_stdout("-c list_agents -C ocf", "", "fence_dummy") ### should not exist if "systemd" in self._rsc_classes: test.add_cmd_check_stdout("-c list_agents ", "pacemaker-cts-dummyd@") ### systemd ### test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C systemd", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C systemd", "pacemaker-cts-dummyd@") test.add_cmd_check_stdout("-c list_agents -C systemd", "", "fence_dummy") ### should not exist if "upstart" in self._rsc_classes: test.add_cmd_check_stdout("-c list_agents ", "pacemaker-cts-dummyd") ### upstart ### test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C upstart", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C upstart", "pacemaker-cts-dummyd") test.add_cmd_check_stdout("-c list_agents -C upstart", "", "fence_dummy") ### should not exist if "stonith" in self._rsc_classes: test.add_cmd_check_stdout("-c list_agents -C stonith", "fence_dummy") ### stonith ### test.add_cmd_check_stdout("-c list_agents -C stonith", "", "pacemaker-cts-dummyd@") ### should not exist test.add_cmd_check_stdout("-c list_agents -C stonith", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents ", "fence_dummy") -class TestOptions(object): - """ Option handler """ - - def __init__(self): - self.options = {} - self.options['list-tests'] = False - self.options['run-all'] = True - self.options['run-only'] = "" - self.options['run-only-pattern'] = "" - self.options['verbose'] = False - self.options['timeout'] = 2 - self.options['force-wait'] = False - self.options['invalid-arg'] = "" - self.options['show-usage'] = False - self.options['pacemaker-remote'] = False - - def build_options(self, argv): - """ Set options based on command-line arguments """ - - args = argv[1:] - skip = False - for i in range(0, len(args)): - if skip: - skip = False - continue - elif args[i] == "-h" or args[i] == "--help": - self.options['show-usage'] = True - elif args[i] == "-l" or args[i] == "--list-tests": - self.options['list-tests'] = True - elif args[i] == "-V" or args[i] == "--verbose": - self.options['verbose'] = True - elif args[i] == "-t" or args[i] == "--timeout": - self.options['timeout'] = float(args[i+1]) - elif args[i] == "-w" or args[i] == "--force-wait": - self.options['force-wait'] = True - elif args[i] == "-R" or args[i] == "--pacemaker-remote": - if BuildOptions.REMOTE_ENABLED: - self.options['pacemaker-remote'] = True - else: - print("ERROR: This build does not support Pacemaker Remote") - sys.exit(ExitStatus.USAGE) - elif args[i] == "-r" or args[i] == "--run-only": - self.options['run-only'] = args[i+1] - skip = True - elif args[i] == "-p" or args[i] == "--run-only-pattern": - self.options['run-only-pattern'] = args[i+1] - skip = True - - def show_usage(self): - """ Show command usage """ - - print("usage: " + sys.argv[0] + " [options]") - print("If no options are provided, all tests will run") - print("Options:") - print("\t [--help | -h] Show usage") - print("\t [--list-tests | -l] Print out all registered tests.") - print("\t [--run-only | -r 'testname'] Run a specific test") - print("\t [--verbose | -V] Verbose output") - print("\t [--timeout | -t 'floating point number']" - "\n\t\tUp to how many seconds each test case waits for the daemon to be initialized." - "\n\t\tDefaults to 2. The value 0 means no limit.") - print("\t [--force-wait | -w]" - "\n\t\tEach test case waits the default/specified --timeout for the daemon without tracking the log.") - if BuildOptions.REMOTE_ENABLED: - print("\t [--pacemaker-remote | -R Test pacemaker-remoted binary instead of pacemaker-execd") - print("\t [--run-only-pattern | -p 'string'] Run only tests containing the string value") - print("\n\tExample: Run only the test 'start_stop'") - print("\t\t " + sys.argv[0] + " --run-only start_stop") - print("\n\tExample: Run only the tests with the string 'systemd' present in them") - print("\t\t " + sys.argv[0] + " --run-only-pattern systemd") - - -def main(argv): +def build_options(): + parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, + description="Run pacemaker-execd regression tests", + epilog="Example: Run only the test 'start_stop'\n" + "\t " + sys.argv[0] + " --run-only start_stop\n\n" + "Example: Run only the tests with the string 'systemd' present in them\n" + "\t " + sys.argv[0] + " --run-only-pattern systemd") + parser.add_argument("-l", "--list-tests", action="store_true", + help="Print out all registered tests") + parser.add_argument("-p", "--run-only-pattern", metavar='PATTERN', + help="Run only tests matching the given pattern") + parser.add_argument("-r", "--run-only", metavar='TEST', + help="Run a specific test") + parser.add_argument("-t", "--timeout", type=float, default=2, + help="Up to how many seconds each test case waits for the daemon to " + "be initialized. Defaults to 2. The value 0 means no limit.") + parser.add_argument("-w", "--force-wait", action="store_true", + help="Each test case waits the default/specified --timeout for the " + "daemon without tracking the log") + if BuildOptions.REMOTE_ENABLED: + parser.add_argument("-R", "--pacemaker-remote", action="store_true", + help="Test pacemaker-remoted binary instead of pacemaker-execd") + parser.add_argument("-V", "--verbose", action="store_true", + help="Verbose output") + + args = parser.parse_args() + return args + + +def main(): """ Run pacemaker-execd regression tests as specified by arguments """ update_path() # Ensure all command output is in portable locale for comparison os.environ['LC_ALL'] = "C" - opts = TestOptions() - opts.build_options(argv) - - if opts.options['show-usage']: - opts.show_usage() - sys.exit(ExitStatus.OK) + opts = build_options() - if opts.options['pacemaker-remote']: + if opts.pacemaker_remote: daemon_name = "pacemaker-remoted" else: daemon_name = "pacemaker-execd" exit_if_proc_running(daemon_name) # Create a temporary directory for log files (the directory will # automatically be erased when done) with tempfile.TemporaryDirectory(prefix="cts-exec-") as logdir: - tests = ExecTests(verbose=opts.options['verbose'], tls=opts.options['pacemaker-remote'], - timeout=opts.options['timeout'], force_wait=opts.options['force-wait'], + tests = ExecTests(verbose=opts.verbose, tls=opts.pacemaker_remote, + timeout=opts.timeout, force_wait=opts.force_wait, logdir=logdir) tests.build_generic_tests() tests.build_multi_rsc_tests() tests.build_negative_tests() tests.build_custom_tests() tests.build_stress_tests() - if opts.options['list-tests']: + if opts.list_tests: tests.print_list() sys.exit(ExitStatus.OK) print("Starting ...") tests.setup_environment() - if opts.options['run-only-pattern'] != "": - tests.run_tests_matching(opts.options['run-only-pattern']) + if opts.run_only_pattern: + tests.run_tests_matching(opts.run_only_pattern) tests.print_results() - elif opts.options['run-only'] != "": - tests.run_single(opts.options['run-only']) + elif opts.run_only: + tests.run_single(opts.run_only) tests.print_results() else: tests.run_tests() tests.print_results() tests.cleanup_environment() tests.exit() if __name__ == "__main__": - main(sys.argv) + main() diff --git a/cts/cts-fencing.in b/cts/cts-fencing.in index 7e485f8f69..3f1a22f2d1 100644 --- a/cts/cts-fencing.in +++ b/cts/cts-fencing.in @@ -1,1281 +1,1096 @@ #!@PYTHON@ """ Regression tests for Pacemaker's fencer """ __copyright__ = "Copyright 2012-2023 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" -import io +import argparse import os -import psutil import sys import subprocess -import time import tempfile -import signal # These imports allow running from a source checkout after running `make`. # Note that while this doesn't necessarily mean it will successfully run tests, # but being able to see --help output can be useful. if os.path.exists("@abs_top_srcdir@/python"): sys.path.insert(0, "@abs_top_srcdir@/python") if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@": sys.path.insert(0, "@abs_top_builddir@/python") from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus +from pacemaker._cts.corosync import Corosync, localname from pacemaker._cts.errors import ExitCodeError, OutputFoundError, OutputNotFoundError, XmlValidationError -from pacemaker._cts.process import killall, exit_if_proc_running, stdout_from_command +from pacemaker._cts.process import killall, exit_if_proc_running from pacemaker._cts.test import Test, Tests TEST_DIR = sys.path[0] -AUTOGEN_COROSYNC_TEMPLATE = """ -totem { - version: 2 - cluster_name: cts-fencing - crypto_cipher: none - crypto_hash: none - transport: udp -} - -nodelist { - node { - nodeid: 1 - name: %s - ring0_addr: 127.0.0.1 - } -} - -logging { - debug: off - to_syslog: no - to_stderr: no - to_logfile: yes - logfile: %s -} -""" - def update_path(): """ Set the PATH environment variable appropriately for the tests """ new_path = os.environ['PATH'] if os.path.exists("%s/cts-fencing.in" % TEST_DIR): print("Running tests from the source tree: %s (%s)" % (BuildOptions._BUILD_DIR, TEST_DIR)) # For pacemaker-fenced and cts-fence-helper new_path = "%s/daemons/fenced:%s" % (BuildOptions._BUILD_DIR, new_path) new_path = "%s/tools:%s" % (BuildOptions._BUILD_DIR, new_path) # For stonith_admin new_path = "%s/cts/support:%s" % (BuildOptions._BUILD_DIR, new_path) # For cts-support else: print("Running tests from the install tree: %s (not %s)" % (BuildOptions.DAEMON_DIR, TEST_DIR)) # For pacemaker-fenced, cts-fence-helper, and cts-support new_path = "%s:%s" % (BuildOptions.DAEMON_DIR, new_path) - print('Using PATH="{}"'.format(new_path)) + print('Using PATH="%s"' % new_path) os.environ['PATH'] = new_path -def localname(): - """ Return the uname of the local host """ - - our_uname = stdout_from_command(["uname", "-n"]) - if our_uname: - our_uname = our_uname[0] - else: - our_uname = "localhost" - return our_uname - - class FenceTest(Test): """ Executor for a single test """ def __init__(self, name, description, **kwargs): Test.__init__(self, name, description, **kwargs) if kwargs.get("with_cpg", False): self._enable_corosync = True - self._stonith_options = ["-c"] + self._daemon_options = ["-c"] else: self._enable_corosync = False - self._stonith_options = ["-s"] + self._daemon_options = ["-s"] self._daemon_location = "pacemaker-fenced" - self._negative_stonith_patterns = [] - self._stonith_output = "" - self._stonith_patterns = [] - - def _count_negative_matches(self, outline): - """ Return 1 if a line matches patterns that shouldn't have occurred """ - - count = 0 - for line in self._negative_stonith_patterns: - if outline.count(line): - count = 1 - if self.verbose: - print("This pattern should not have matched = '%s" % (line)) - return count def _kill_daemons(self): killall(["pacemakerd", "pacemaker-fenced"]) - def _match_patterns(self): - """ Check test output for expected patterns """ - - negative_matches = 0 - cur = 0 - pats = self._stonith_patterns - total_patterns = len(self._stonith_patterns) - - if len(self._stonith_patterns) == 0 and len(self._negative_stonith_patterns) == 0: - return - - for line in self._stonith_output.split("\n"): - negative_matches = negative_matches + self._count_negative_matches(line) - if len(pats) == 0: - continue - cur = -1 - for pat in pats: - cur = cur + 1 - if line.count(pats[cur]): - del pats[cur] - break - - if len(pats) > 0 or negative_matches: - if self.verbose: - for pat in pats: - print("Pattern Not Matched = '%s'" % pat) - - msg = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." - self._result_txt = msg % (self.name, len(pats), total_patterns, negative_matches) - self.exitcode = ExitStatus.ERROR - def _start_daemons(self): if self.verbose: - self._stonith_options += ["-V"] - print("Starting %s with %s" % (self._daemon_location, self._stonith_options)) - - cmd = ["pacemaker-fenced", "-l", self.logpath] + self._stonith_options - self._stonith_process = subprocess.Popen(cmd) + self._daemon_options += ["-V"] + print("Starting %s with %s" % (self._daemon_location, self._daemon_options)) - def clean_environment(self): - """ Clean up the host after executing a test """ - - if self._stonith_process: - if self._stonith_process.poll() == None: - self._stonith_process.terminate() - self._stonith_process.wait() - else: - return_code = { - getattr(signal, _signame): _signame - for _signame in dir(signal) - if _signame.startswith('SIG') and not _signame.startswith("SIG_") - }.get(-self._stonith_process.returncode, "RET=%d" % (self._stonith_process.returncode)) - msg = "FAILURE - '%s' failed. pacemaker-fenced abnormally exited during test (%s)." - self._result_txt = msg % (self.name, return_code) - self.exitcode = ExitStatus.ERROR - - self._stonith_output = "" - self._stonith_process = None - - # the default for utf-8 encoding would error out if e.g. memory corruption - # makes fenced output any kind of 8 bit value - while still interesting - # for debugging and we'd still like the regression-test to go over the - # full set of test-cases - logfile = io.open(self.logpath, 'rt', encoding = "ISO-8859-1") - for line in logfile.readlines(): - self._stonith_output += line - - if self.verbose: - print("Daemon Output Start") - print(self._stonith_output) - print("Daemon Output End") - - def add_stonith_log_pattern(self, pattern): - """ Add a log pattern to expect from this test """ - - self._stonith_patterns.append(pattern) - - def add_stonith_neg_log_pattern(self, pattern): - """ Add a log pattern that should not occur with this test """ - - self._negative_stonith_patterns.append(pattern) + cmd = ["pacemaker-fenced", "-l", self.logpath] + self._daemon_options + self._daemon_process = subprocess.Popen(cmd) class FenceTests(Tests): """ Collection of all fencing regression tests """ def __init__(self, **kwargs): - Tests.__init__(self, **kwargs) + Tests.__init__(self, **kwargs) - self._autogen_corosync_cfg = not os.path.exists(BuildOptions.COROSYNC_CONFIG_FILE) + self._corosync = Corosync(self.verbose, self.logdir, "cts-fencing") def new_test(self, name, description, with_cpg=False): """ Create a named test """ test = FenceTest(name, description, verbose=self.verbose, with_cpg=with_cpg, timeout=self.timeout, force_wait=self.force_wait, logdir=self.logdir) self._tests.append(test) return test - def start_corosync(self): - """ Start the corosync process """ - - if self.verbose: - print("Starting corosync") - - test = subprocess.Popen("corosync", stdout=subprocess.PIPE) - test.wait() - time.sleep(10) - def run_cpg_only(self): """ Run all corosync-enabled tests """ for test in self._tests: if test._enable_corosync: test.run() def run_no_cpg(self): """ Run all standalone tests """ for test in self._tests: if not test._enable_corosync: test.run() def build_api_sanity_tests(self): """ Register tests to verify basic API usage """ verbose_arg = "" if self.verbose: verbose_arg = "-V" test = self.new_test("standalone_low_level_api_test", "Sanity test client api in standalone mode.") test.add_cmd("cts-fence-helper", "-t %s" % (verbose_arg), validate=False) test = self.new_test("cpg_low_level_api_test", "Sanity test client api using mainloop and cpg.", True) test.add_cmd("cts-fence-helper", "-m %s" % (verbose_arg), validate=False) def build_custom_timeout_tests(self): """ Register tests to verify custom timeout usage """ # custom timeout without topology test = self.new_test("cpg_custom_timeout_1", "Verify per device timeouts work as expected without using topology.", True) test.add_cmd('stonith_admin', '--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node1 node2 node3"') test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=1"') test.add_cmd('stonith_admin', '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4"') test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") # timeout is 5+1+4 = 10 - test.add_stonith_log_pattern("Total timeout set to 12") + test.add_log_pattern("Total timeout set to 12") # custom timeout _WITH_ topology test = self.new_test("cpg_custom_timeout_2", "Verify per device timeouts work as expected _WITH_ topology.", True) test.add_cmd('stonith_admin', '--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node1 node2 node3"') test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=1"') test.add_cmd('stonith_admin', '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4000"') test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") # timeout is 5+1+4000 = 4006 - test.add_stonith_log_pattern("Total timeout set to 4807") + test.add_log_pattern("Total timeout set to 4807") def build_fence_merge_tests(self): """ Register tests to verify when fence operations should be merged """ ### Simple test that overlapping fencing operations get merged test = self.new_test("cpg_custom_merge_single", "Verify overlapping identical fencing operations are merged, no fencing levels used.", True) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 10") ### one merger will happen - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") ### Test that multiple mergers occur test = self.new_test("cpg_custom_merge_multiple", "Verify multiple overlapping identical fencing operations are merged", True) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"delay=2\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 10") ### 4 mergers should occur - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") ### Test that multiple mergers occur with topologies used test = self.new_test("cpg_custom_merge_with_topology", "Verify multiple overlapping identical fencing operations are merged with fencing levels.", True) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true1") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 10") ### 4 mergers should occur - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") - test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") - test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") + test.add_log_pattern("Operation 'off' targeting node3 by ") def build_fence_no_merge_tests(self): """ Register tests to verify when fence operations should not be merged """ test = self.new_test("cpg_custom_no_merge", "Verify differing fencing operations are not merged", True) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3 node2\" ") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true1") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node2 -t 10") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 10") - test.add_stonith_neg_log_pattern("Merging fencing action 'off' targeting node3 originating from client") + test.add_log_pattern("Merging fencing action 'off' targeting node3 originating from client", + negative=True) def build_standalone_tests(self): """ Register a grab bag of tests that can be executed in standalone or corosync mode """ test_types = [ { "prefix" : "standalone", "use_cpg" : False, }, { "prefix" : "cpg", "use_cpg" : True, }, ] # test what happens when all devices timeout for test_type in test_types: test = self.new_test("%s_fence_multi_device_failure" % test_type["prefix"], "Verify that all devices timeout, a fencing failure is returned.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false3 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") if test_type["use_cpg"]: test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -F node3 -t 2", ExitStatus.TIMEOUT) - test.add_stonith_log_pattern("Total timeout set to 7") + test.add_log_pattern("Total timeout set to 7") else: test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -F node3 -t 2", ExitStatus.ERROR) - test.add_stonith_log_pattern("targeting node3 using false1 returned ") - test.add_stonith_log_pattern("targeting node3 using false2 returned ") - test.add_stonith_log_pattern("targeting node3 using false3 returned ") + test.add_log_pattern("targeting node3 using false1 returned ") + test.add_log_pattern("targeting node3 using false2 returned ") + test.add_log_pattern("targeting node3 using false3 returned ") # test what happens when multiple devices can fence a node, but the first device fails. for test_type in test_types: test = self.new_test("%s_fence_device_failure_rollover" % test_type["prefix"], "Verify that when one fence device fails for a node, the others are tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") if test_type["use_cpg"]: - test.add_stonith_log_pattern("Total timeout set to 18") + test.add_log_pattern("Total timeout set to 18") # test what happens when we try to use a missing fence-agent. for test_type in test_types: test = self.new_test("%s_fence_missing_agent" % test_type["prefix"], "Verify proper error-handling when using a non-existent fence-agent.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_missing -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node2\"") test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -F node3 -t 5", ExitStatus.NOSUCH) test.add_cmd("stonith_admin", "--output-as=xml -F node2 -t 5") # simple topology test for one device for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_topology_simple" % test_type["prefix"], "Verify all fencing devices at a level are used.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") - test.add_stonith_log_pattern("Total timeout set to 6") - test.add_stonith_log_pattern("targeting node3 using true returned 0") + test.add_log_pattern("Total timeout set to 6") + test.add_log_pattern("targeting node3 using true returned 0") # add topology, delete topology, verify fencing still works for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_topology_add_remove" % test_type["prefix"], "Verify fencing occurrs after all topology levels are removed", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true") test.add_cmd("stonith_admin", "--output-as=xml -d node3 -i 1") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") - test.add_stonith_log_pattern("Total timeout set to 6") - test.add_stonith_log_pattern("targeting node3 using true returned 0") + test.add_log_pattern("Total timeout set to 6") + test.add_log_pattern("targeting node3 using true returned 0") # test what happens when the first fencing level has multiple devices. for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_topology_device_fails" % test_type["prefix"], "Verify if one device in a level fails, the other is tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R false -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 20") - test.add_stonith_log_pattern("Total timeout set to 48") - test.add_stonith_log_pattern("targeting node3 using false returned 1") - test.add_stonith_log_pattern("targeting node3 using true returned 0") + test.add_log_pattern("Total timeout set to 48") + test.add_log_pattern("targeting node3 using false returned 1") + test.add_log_pattern("targeting node3 using true returned 0") # test what happens when the first fencing level fails. for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_topology_multi_level_fails" % test_type["prefix"], "Verify if one level fails, the next leve is tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true4") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 3") - test.add_stonith_log_pattern("Total timeout set to 21") - test.add_stonith_log_pattern("targeting node3 using false1 returned 1") - test.add_stonith_log_pattern("targeting node3 using false2 returned 1") - test.add_stonith_log_pattern("targeting node3 using true3 returned 0") - test.add_stonith_log_pattern("targeting node3 using true4 returned 0") + test.add_log_pattern("Total timeout set to 21") + test.add_log_pattern("targeting node3 using false1 returned 1") + test.add_log_pattern("targeting node3 using false2 returned 1") + test.add_log_pattern("targeting node3 using true3 returned 0") + test.add_log_pattern("targeting node3 using true4 returned 0") # test what happens when the first fencing level had devices that no one has registered for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_topology_missing_devices" % test_type["prefix"], "Verify topology can continue with missing devices.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true4") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") # Test what happens if multiple fencing levels are defined, and then the first one is removed. for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_topology_level_removal" % test_type["prefix"], "Verify level removal works.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true4") # Now remove level 2, verify none of the devices in level two are hit. test.add_cmd("stonith_admin", "--output-as=xml -d node3 -i 2") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 20") - test.add_stonith_log_pattern("Total timeout set to 96") - test.add_stonith_log_pattern("targeting node3 using false1 returned 1") - test.add_stonith_neg_log_pattern("targeting node3 using false2 returned ") - test.add_stonith_log_pattern("targeting node3 using true3 returned 0") - test.add_stonith_log_pattern("targeting node3 using true4 returned 0") + test.add_log_pattern("Total timeout set to 96") + test.add_log_pattern("targeting node3 using false1 returned 1") + test.add_log_pattern("targeting node3 using false2 returned ", + negative=True) + test.add_log_pattern("targeting node3 using true3 returned 0") + test.add_log_pattern("targeting node3 using true4 returned 0") # Test targeting a topology level by node name pattern. for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_topology_level_pattern" % test_type["prefix"], "Verify targeting topology by node name pattern works.", test_type["use_cpg"]) test.add_cmd("stonith_admin", """--output-as=xml -R true -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node1 node2 node3" """) test.add_cmd("stonith_admin", """--output-as=xml -r '@node.*' -i 1 -v true""") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") - test.add_stonith_log_pattern("targeting node3 using true returned 0") + test.add_log_pattern("targeting node3 using true returned 0") # test allowing commas and semicolons as delimiters in pcmk_host_list for test_type in test_types: test = self.new_test("%s_host_list_delimiters" % test_type["prefix"], "Verify commas and semicolons can be used as pcmk_host_list delimiters", test_type["use_cpg"]) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node1,node2,node3" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=pcmk1;pcmk2;pcmk3" """) test.add_cmd("stonith_admin", "stonith_admin --output-as=xml -F node2 -t 5") test.add_cmd("stonith_admin", "stonith_admin --output-as=xml -F pcmk3 -t 5") - test.add_stonith_log_pattern("targeting node2 using true1 returned 0") - test.add_stonith_log_pattern("targeting pcmk3 using true2 returned 0") + test.add_log_pattern("targeting node2 using true1 returned 0") + test.add_log_pattern("targeting pcmk3 using true2 returned 0") # test the stonith builds the correct list of devices that can fence a node. for test_type in test_types: test = self.new_test("%s_list_devices" % test_type["prefix"], "Verify list of devices that can fence a node is correct", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -l node1 -V", "true2", "true1") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -l node1 -V", "true3", "true1") # simple test of device monitor for test_type in test_types: test = self.new_test("%s_monitor" % test_type["prefix"], "Verify device is reachable", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -Q true1") test.add_cmd("stonith_admin", "--output-as=xml -Q false1") test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -Q true2", ExitStatus.NOSUCH) # Verify monitor occurs for duration of timeout period on failure for test_type in test_types: test = self.new_test("%s_monitor_timeout" % test_type["prefix"], "Verify monitor uses duration of timeout period given.", test_type["use_cpg"]) test.add_cmd("stonith_admin", '--output-as=xml -R true1 -a fence_dummy -o "mode=fail" -o "monitor_mode=fail" -o "pcmk_host_list=node3"') test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -Q true1 -t 5", ExitStatus.ERROR) - test.add_stonith_log_pattern("Attempt 2 to execute") + test.add_log_pattern("Attempt 2 to execute") # Verify monitor occurs for duration of timeout period on failure, but stops at max retries for test_type in test_types: test = self.new_test("%s_monitor_timeout_max_retries" % test_type["prefix"], "Verify monitor retries until max retry value or timeout is hit.", test_type["use_cpg"]) test.add_cmd("stonith_admin", '--output-as=xml -R true1 -a fence_dummy -o "mode=fail" -o "monitor_mode=fail" -o "pcmk_host_list=node3"') test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -Q true1 -t 15", ExitStatus.ERROR) - test.add_stonith_log_pattern("Attempted to execute agent fence_dummy (list) the maximum number of times") + test.add_log_pattern("Attempted to execute agent fence_dummy (list) the maximum number of times") # simple register test for test_type in test_types: test = self.new_test("%s_register" % test_type["prefix"], "Verify devices can be registered and un-registered", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -Q true1") test.add_cmd("stonith_admin", "--output-as=xml -D true1") test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -Q true1", ExitStatus.NOSUCH) # simple reboot test for test_type in test_types: test = self.new_test("%s_reboot" % test_type["prefix"], "Verify devices can be rebooted", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -B node3 -t 5") test.add_cmd("stonith_admin", "--output-as=xml -D true1") test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -Q true1", ExitStatus.NOSUCH) # test fencing history. for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_fence_history" % test_type["prefix"], "Verify last fencing operation is returned.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5 -V") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -H node3", 'action="off" target="node3" .* status="success"') # simple test of dynamic list query for test_type in test_types: test = self.new_test("%s_dynamic_list_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -l fake_port_1", 'count="3"') # fence using dynamic list query for test_type in test_types: test = self.new_test("%s_fence_dynamic_list_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -F fake_port_1 -t 5 -V") # simple test of query using status action for test_type in test_types: test = self.new_test("%s_status_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -l fake_port_1", 'count="3"') # test what happens when no reboot action is advertised for test_type in test_types: test = self.new_test("%s_no_reboot_support" % test_type["prefix"], "Verify reboot action defaults to off when no reboot action is advertised by agent.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy_no_reboot -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -B node1 -t 5 -V") - test.add_stonith_log_pattern("does not support reboot") - test.add_stonith_log_pattern("using true1 returned 0") + test.add_log_pattern("does not support reboot") + test.add_log_pattern("using true1 returned 0") # make sure reboot is used when reboot action is advertised for test_type in test_types: test = self.new_test("%s_with_reboot_support" % test_type["prefix"], "Verify reboot action can be used when metadata advertises it.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -B node1 -t 5 -V") - test.add_stonith_neg_log_pattern("does not advertise support for 'reboot', performing 'off'") - test.add_stonith_log_pattern("using true1 returned 0") + test.add_log_pattern("does not advertise support for 'reboot', performing 'off'", + negative=True) + test.add_log_pattern("using true1 returned 0") # make sure requested fencing delay is applied only for the first device in the first level # make sure static delay from pcmk_delay_base is added for test_type in test_types: if not test_type["use_cpg"]: continue test = self.new_test("%s_topology_delay" % test_type["prefix"], "Verify requested fencing delay is applied only for the first device in the first level and pcmk_delay_base is added.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\" -o \"pcmk_delay_base=1\"") test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\" -o \"pcmk_delay_base=1\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -F node3 --delay 1") - test.add_stonith_log_pattern("Delaying 'off' action targeting node3 using true1 for 2s | timeout=120s requested_delay=1s base=1s max=1s") - test.add_stonith_log_pattern("Delaying 'off' action targeting node3 using false1 for 1s | timeout=120s requested_delay=0s base=1s max=1s") - test.add_stonith_neg_log_pattern("Delaying 'off' action targeting node3 using true2") - test.add_stonith_neg_log_pattern("Delaying 'off' action targeting node3 using true3") + test.add_log_pattern("Delaying 'off' action targeting node3 using true1 for 2s | timeout=120s requested_delay=1s base=1s max=1s") + test.add_log_pattern("Delaying 'off' action targeting node3 using false1 for 1s | timeout=120s requested_delay=0s base=1s max=1s") + test.add_log_pattern("Delaying 'off' action targeting node3 using true2", + negative=True) + test.add_log_pattern("Delaying 'off' action targeting node3 using true3", + negative=True) def build_nodeid_tests(self): """ Register tests that use a corosync node id """ our_uname = localname() ### verify nodeid is supplied when nodeid is in the metadata parameters test = self.new_test("cpg_supply_nodeid", "Verify nodeid is given when fence agent has nodeid as parameter", True) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -F %s -t 3" % (our_uname)) - test.add_stonith_log_pattern("as nodeid with fence action 'off' targeting %s" % (our_uname)) + test.add_log_pattern("as nodeid with fence action 'off' targeting %s" % (our_uname)) ### verify nodeid is _NOT_ supplied when nodeid is not in the metadata parameters test = self.new_test("cpg_do_not_supply_nodeid", "Verify nodeid is _NOT_ given when fence agent does not have nodeid as parameter", True) # use a host name that won't be in corosync.conf test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=regr-test\"") test.add_cmd("stonith_admin", "--output-as=xml -F regr-test -t 3") - test.add_stonith_neg_log_pattern("as nodeid with fence action 'off' targeting regr-test") + test.add_log_pattern("as nodeid with fence action 'off' targeting regr-test", + negative=True) ### verify nodeid use doesn't explode standalone mode test = self.new_test("standalone_do_not_supply_nodeid", "Verify nodeid in metadata parameter list doesn't kill standalone mode", False) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -F %s -t 3" % (our_uname)) - test.add_stonith_neg_log_pattern("as nodeid with fence action 'off' targeting %s" % (our_uname)) + test.add_log_pattern("as nodeid with fence action 'off' targeting %s" % our_uname, + negative=True) def build_unfence_tests(self): """ Register tests that verify unfencing """ our_uname = localname() ### verify unfencing using automatic unfencing test = self.new_test("cpg_unfence_required_1", "Verify require unfencing on all devices when automatic=true in agent's metadata", True) test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) # both devices should be executed - test.add_stonith_log_pattern("using true1 returned 0") - test.add_stonith_log_pattern("using true2 returned 0") + test.add_log_pattern("using true1 returned 0") + test.add_log_pattern("using true2 returned 0") ### verify unfencing using automatic unfencing fails if any of the required agents fail test = self.new_test("cpg_unfence_required_2", "Verify require unfencing on all devices when automatic=true in agent's metadata", True) test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true2 -a fence_dummy_auto_unfence -o "mode=fail" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -U %s -t 6" % (our_uname), ExitStatus.ERROR) ### verify unfencing using automatic devices with topology test = self.new_test("cpg_unfence_required_3", "Verify require unfencing on all devices even when at different topology levels", True) test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) - test.add_stonith_log_pattern("using true1 returned 0") - test.add_stonith_log_pattern("using true2 returned 0") + test.add_log_pattern("using true1 returned 0") + test.add_log_pattern("using true2 returned 0") ### verify unfencing using automatic devices with topology test = self.new_test("cpg_unfence_required_4", "Verify all required devices are executed even with topology levels fail.", True) test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true3 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true4 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R false3 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R false4 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 1 -v false1" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v false2" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v false3" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v true3" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 3 -v false4" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 4 -v true4" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) - test.add_stonith_log_pattern("using true1 returned 0") - test.add_stonith_log_pattern("using true2 returned 0") - test.add_stonith_log_pattern("using true3 returned 0") - test.add_stonith_log_pattern("using true4 returned 0") + test.add_log_pattern("using true1 returned 0") + test.add_log_pattern("using true2 returned 0") + test.add_log_pattern("using true3 returned 0") + test.add_log_pattern("using true4 returned 0") def build_unfence_on_target_tests(self): """ Register tests that verify unfencing that runs on the target """ our_uname = localname() ### verify unfencing using on_target device test = self.new_test("cpg_unfence_on_target_1", "Verify unfencing with on_target = true", True) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) - test.add_stonith_log_pattern("(on) to be executed on target") + test.add_log_pattern("(on) to be executed on target") ### verify failure of unfencing using on_target device test = self.new_test("cpg_unfence_on_target_2", "Verify failure unfencing with on_target = true", True) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake_1234\"" % (our_uname)) test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -U node_fake_1234 -t 3", ExitStatus.NOSUCH) - test.add_stonith_log_pattern("(on) to be executed on target") + test.add_log_pattern("(on) to be executed on target") ### verify unfencing using on_target device with topology test = self.new_test("cpg_unfence_on_target_3", "Verify unfencing with on_target = true using topology", True) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) - test.add_stonith_log_pattern("(on) to be executed on target") + test.add_log_pattern("(on) to be executed on target") ### verify unfencing using on_target device with topology fails when target node doesn't exist test = self.new_test("cpg_unfence_on_target_4", "Verify unfencing failure with on_target = true using topology", True) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 2 -v true2") test.add_cmd_expected_fail("stonith_admin", "--output-as=xml -U node_fake -t 3", ExitStatus.NOSUCH) - test.add_stonith_log_pattern("(on) to be executed on target") + test.add_log_pattern("(on) to be executed on target") def build_remap_tests(self): """ Register tests that verify remapping of reboots to off-on """ test = self.new_test("cpg_remap_simple", "Verify sequential topology reboot is remapped to all-off-then-all-on", True) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """ """-o "pcmk_off_timeout=1" -o "pcmk_reboot_timeout=10" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """ """-o "pcmk_off_timeout=2" -o "pcmk_reboot_timeout=20" """) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v true1 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") - test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") + test.add_log_pattern("Remapping multiple-device reboot targeting node_fake") # timeout should be sum of off timeouts (1+2=3), not reboot timeouts (10+20=30) - test.add_stonith_log_pattern("Total timeout set to 3 for peer's fencing targeting node_fake") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true2") - test.add_stonith_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") + test.add_log_pattern("Total timeout set to 3 for peer's fencing targeting node_fake") + test.add_log_pattern("perform 'off' action targeting node_fake using true1") + test.add_log_pattern("perform 'off' action targeting node_fake using true2") + test.add_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") # fence_dummy sets "on" as an on_target action - test.add_stonith_log_pattern("Ignoring true1 'on' failure (no capable peers) targeting node_fake") - test.add_stonith_log_pattern("Ignoring true2 'on' failure (no capable peers) targeting node_fake") - test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") + test.add_log_pattern("Ignoring true1 'on' failure (no capable peers) targeting node_fake") + test.add_log_pattern("Ignoring true2 'on' failure (no capable peers) targeting node_fake") + test.add_log_pattern("Undoing remap of reboot targeting node_fake") test = self.new_test("cpg_remap_simple_off", "Verify sequential topology reboot skips 'on' if " "pcmk_reboot_action=off or agent doesn't support " "'on'", True) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o mode=pass " "-o pcmk_host_list=node_fake -o pcmk_off_timeout=1 " "-o pcmk_reboot_timeout=10 -o pcmk_reboot_action=off") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy_no_on " "-o mode=pass -o pcmk_host_list=node_fake " "-o pcmk_off_timeout=2 -o pcmk_reboot_timeout=20") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v true1 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") - test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") + test.add_log_pattern("Remapping multiple-device reboot targeting node_fake") # timeout should be sum of off timeouts (1+2=3), not reboot timeouts (10+20=30) - test.add_stonith_log_pattern("Total timeout set to 3 for peer's fencing targeting node_fake") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true2") - test.add_stonith_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") + test.add_log_pattern("Total timeout set to 3 for peer's fencing targeting node_fake") + test.add_log_pattern("perform 'off' action targeting node_fake using true1") + test.add_log_pattern("perform 'off' action targeting node_fake using true2") + test.add_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") # "on" should be skipped - test.add_stonith_log_pattern("Not turning node_fake back on using " - "true1 because the device is configured " - "to stay off") - test.add_stonith_log_pattern("Not turning node_fake back on using true2" - " because the agent doesn't support 'on'") - test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") + test.add_log_pattern("Not turning node_fake back on using " + "true1 because the device is configured " + "to stay off") + test.add_log_pattern("Not turning node_fake back on using true2" + " because the agent doesn't support 'on'") + test.add_log_pattern("Undoing remap of reboot targeting node_fake") test = self.new_test("cpg_remap_automatic", "Verify remapped topology reboot skips automatic 'on'", True) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy_auto_unfence """ """-o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy_auto_unfence """ """-o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v true1 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") - test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true2") - test.add_stonith_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") - test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") - test.add_stonith_neg_log_pattern("perform 'on' action targeting node_fake using") - test.add_stonith_neg_log_pattern("'on' failure") + test.add_log_pattern("Remapping multiple-device reboot targeting node_fake") + test.add_log_pattern("perform 'off' action targeting node_fake using true1") + test.add_log_pattern("perform 'off' action targeting node_fake using true2") + test.add_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") + test.add_log_pattern("Undoing remap of reboot targeting node_fake") + test.add_log_pattern("perform 'on' action targeting node_fake using", + negative=True) + test.add_log_pattern("'on' failure", + negative=True) test = self.new_test("cpg_remap_complex_1", "Verify remapped topology reboot in second level works if non-remapped first level fails", True) test.add_cmd("stonith_admin", """--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 2 -v true1 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") - test.add_stonith_log_pattern("perform 'reboot' action targeting node_fake using false1") - test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true2") - test.add_stonith_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") - test.add_stonith_log_pattern("Ignoring true1 'on' failure (no capable peers) targeting node_fake") - test.add_stonith_log_pattern("Ignoring true2 'on' failure (no capable peers) targeting node_fake") - test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") + test.add_log_pattern("perform 'reboot' action targeting node_fake using false1") + test.add_log_pattern("Remapping multiple-device reboot targeting node_fake") + test.add_log_pattern("perform 'off' action targeting node_fake using true1") + test.add_log_pattern("perform 'off' action targeting node_fake using true2") + test.add_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") + test.add_log_pattern("Ignoring true1 'on' failure (no capable peers) targeting node_fake") + test.add_log_pattern("Ignoring true2 'on' failure (no capable peers) targeting node_fake") + test.add_log_pattern("Undoing remap of reboot targeting node_fake") test = self.new_test("cpg_remap_complex_2", "Verify remapped topology reboot failure in second level proceeds to third level", True) test.add_cmd("stonith_admin", """--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true3 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 2 -v true1 -v false2 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 3 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") - test.add_stonith_log_pattern("perform 'reboot' action targeting node_fake using false1") - test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") - test.add_stonith_log_pattern("perform 'off' action targeting node_fake using false2") - test.add_stonith_log_pattern("Attempted to execute agent fence_dummy (off) the maximum number of times") - test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") - test.add_stonith_log_pattern("perform 'reboot' action targeting node_fake using true2") - test.add_stonith_neg_log_pattern("node_fake with true3") + test.add_log_pattern("perform 'reboot' action targeting node_fake using false1") + test.add_log_pattern("Remapping multiple-device reboot targeting node_fake") + test.add_log_pattern("perform 'off' action targeting node_fake using true1") + test.add_log_pattern("perform 'off' action targeting node_fake using false2") + test.add_log_pattern("Attempted to execute agent fence_dummy (off) the maximum number of times") + test.add_log_pattern("Undoing remap of reboot targeting node_fake") + test.add_log_pattern("perform 'reboot' action targeting node_fake using true2") + test.add_log_pattern("node_fake with true3", + negative=True) def build_query_tests(self): """ run stonith_admin --metadata for the fence_dummy agent and check command output """ test = self.new_test("get_metadata", "Run stonith_admin --metadata for the fence_dummy agent", True) test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -a fence_dummy --metadata", ' #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" static int last_cib_op_done = 0; static gboolean attribute_timer_cb(gpointer data) { attribute_t *a = data; crm_trace("Dampen interval expired for %s", a->id); attrd_write_or_elect_attribute(a); return FALSE; } static void attrd_cib_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { int level = LOG_ERR; GHashTableIter iter; const char *peer = NULL; attribute_value_t *v = NULL; char *name = user_data; attribute_t *a = g_hash_table_lookup(attributes, name); if(a == NULL) { crm_info("Attribute %s no longer exists", name); return; } a->update = 0; if (rc == pcmk_ok && call_id < 0) { rc = call_id; } switch (rc) { case pcmk_ok: level = LOG_INFO; last_cib_op_done = call_id; if (a->timer && !a->timeout_ms) { // Remove temporary dampening for failed writes mainloop_timer_del(a->timer); a->timer = NULL; } break; case -pcmk_err_diff_failed: /* When an attr changes while the CIB is syncing */ case -ETIME: /* When an attr changes while there is a DC election */ case -ENXIO: /* When an attr changes while the CIB is syncing a * newer config from a node that just came up */ level = LOG_WARNING; break; } do_crm_log(level, "CIB update %d result for %s: %s " CRM_XS " rc=%d", call_id, a->id, pcmk_strerror(rc), rc); g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, (gpointer *) & peer, (gpointer *) & v)) { do_crm_log(level, "* %s[%s]=%s", a->id, peer, v->requested); free(v->requested); v->requested = NULL; if (rc != pcmk_ok) { a->changed = true; /* Attempt write out again */ } } if (a->changed && attrd_election_won()) { if (rc == pcmk_ok) { /* We deferred a write of a new update because this update was in * progress. Write out the new value without additional delay. */ attrd_write_attribute(a, false); /* We're re-attempting a write because the original failed; delay * the next attempt so we don't potentially flood the CIB manager * and logs with a zillion attempts per second. * * @TODO We could elect a new writer instead. However, we'd have to * somehow downgrade our vote, and we'd still need something like this * if all peers similarly fail to write this attribute (which may * indicate a corrupted attribute entry rather than a CIB issue). */ } else if (a->timer) { // Attribute has a dampening value, so use that as delay if (!mainloop_timer_running(a->timer)) { crm_trace("Delayed re-attempted write for %s by %s", name, pcmk__readable_interval(a->timeout_ms)); mainloop_timer_start(a->timer); } } else { /* Set a temporary dampening of 2 seconds (timer will continue * to exist until the attribute's dampening gets set or the * write succeeds). */ a->timer = attrd_add_timer(a->id, 2000, a); mainloop_timer_start(a->timer); } } } static void build_update_element(xmlNode *parent, attribute_t *a, const char *nodeid, const char *value) { const char *set = NULL; xmlNode *xml_obj = NULL; xml_obj = create_xml_node(parent, XML_CIB_TAG_STATE); crm_xml_add(xml_obj, XML_ATTR_ID, nodeid); xml_obj = create_xml_node(xml_obj, XML_TAG_TRANSIENT_NODEATTRS); crm_xml_add(xml_obj, XML_ATTR_ID, nodeid); if (pcmk__str_eq(a->set_type, XML_TAG_ATTR_SETS, pcmk__str_null_matches)) { xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS); } else if (pcmk__str_eq(a->set_type, XML_TAG_UTILIZATION, pcmk__str_none)) { xml_obj = create_xml_node(xml_obj, XML_TAG_UTILIZATION); } else { crm_err("Unknown set type attribute: %s", a->set_type); } if (a->set_id) { crm_xml_set_id(xml_obj, "%s", a->set_id); } else { crm_xml_set_id(xml_obj, "%s-%s", XML_CIB_TAG_STATUS, nodeid); } set = ID(xml_obj); xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR); if (a->uuid) { crm_xml_set_id(xml_obj, "%s", a->uuid); } else { crm_xml_set_id(xml_obj, "%s-%s", set, a->id); } crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, a->id); if(value) { crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, value); } else { crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, ""); crm_xml_add(xml_obj, "__delete__", XML_NVPAIR_ATTR_VALUE); } } static void send_alert_attributes_value(attribute_t *a, GHashTable *t) { int rc = 0; attribute_value_t *at = NULL; GHashTableIter vIter; g_hash_table_iter_init(&vIter, t); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & at)) { rc = attrd_send_attribute_alert(at->nodename, at->nodeid, a->id, at->current); crm_trace("Sent alerts for %s[%s]=%s: nodeid=%d rc=%d", a->id, at->nodename, at->current, at->nodeid, rc); } } static void set_alert_attribute_value(GHashTable *t, attribute_value_t *v) { attribute_value_t *a_v = NULL; a_v = calloc(1, sizeof(attribute_value_t)); CRM_ASSERT(a_v != NULL); a_v->nodeid = v->nodeid; a_v->nodename = strdup(v->nodename); pcmk__str_update(&a_v->current, v->current); g_hash_table_replace(t, a_v->nodename, a_v); } mainloop_timer_t * attrd_add_timer(const char *id, int timeout_ms, attribute_t *attr) { return mainloop_timer_add(id, timeout_ms, FALSE, attribute_timer_cb, attr); } void attrd_write_attribute(attribute_t *a, bool ignore_delay) { int private_updates = 0, cib_updates = 0; xmlNode *xml_top = NULL; attribute_value_t *v = NULL; GHashTableIter iter; enum cib_call_options flags = cib_quorum_override; GHashTable *alert_attribute_value = NULL; if (a == NULL) { return; } /* If this attribute will be written to the CIB ... */ - if (!a->is_private) { + if (!stand_alone && !a->is_private) { /* Defer the write if now's not a good time */ CRM_CHECK(the_cib != NULL, return); if (a->update && (a->update < last_cib_op_done)) { crm_info("Write out of '%s' continuing: update %d considered lost", a->id, a->update); a->update = 0; // Don't log this message again } else if (a->update) { crm_info("Write out of '%s' delayed: update %d in progress", a->id, a->update); return; } else if (mainloop_timer_running(a->timer)) { if (ignore_delay) { /* 'refresh' forces a write of the current value of all attributes * Cancel any existing timers, we're writing it NOW */ mainloop_timer_stop(a->timer); crm_debug("Write out of '%s': timer is running but ignore delay", a->id); } else { crm_info("Write out of '%s' delayed: timer is running", a->id); return; } } /* Initialize the status update XML */ xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS); } /* Attribute will be written shortly, so clear changed flag */ a->changed = false; /* We will check all peers' uuids shortly, so initialize this to false */ a->unknown_peer_uuids = false; /* Attribute will be written shortly, so clear forced write flag */ a->force_write = FALSE; /* Make the table for the attribute trap */ alert_attribute_value = pcmk__strikey_table(NULL, attrd_free_attribute_value); /* Iterate over each peer value of this attribute */ g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & v)) { crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, CRM_GET_PEER_ANY); /* If the value's peer info does not correspond to a peer, ignore it */ if (peer == NULL) { crm_notice("Cannot update %s[%s]=%s because peer not known", a->id, v->nodename, v->current); continue; } /* If we're just learning the peer's node id, remember it */ if (peer->id && (v->nodeid == 0)) { crm_trace("Learned ID %u for node %s", peer->id, v->nodename); v->nodeid = peer->id; } /* If this is a private attribute, no update needs to be sent */ - if (a->is_private) { + if (stand_alone || a->is_private) { private_updates++; continue; } /* If the peer is found, but its uuid is unknown, defer write */ if (peer->uuid == NULL) { a->unknown_peer_uuids = true; crm_notice("Cannot update %s[%s]=%s because peer UUID not known " "(will retry if learned)", a->id, v->nodename, v->current); continue; } /* Add this value to status update XML */ crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID %u/%u)", a->id, v->nodename, v->current, peer->uname, peer->uuid, peer->id, v->nodeid); build_update_element(xml_top, a, peer->uuid, v->current); cib_updates++; /* Preservation of the attribute to transmit alert */ set_alert_attribute_value(alert_attribute_value, v); free(v->requested); v->requested = NULL; if (v->current) { v->requested = strdup(v->current); } else { /* Older attrd versions don't know about the cib_mixed_update * flag so make sure it goes to the local cib which does */ cib__set_call_options(flags, crm_system_name, cib_mixed_update|cib_scope_local); } } if (private_updates) { crm_info("Processed %d private change%s for %s, id=%s, set=%s", private_updates, pcmk__plural_s(private_updates), a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a")); } if (cib_updates) { crm_log_xml_trace(xml_top, __func__); a->update = cib_internal_op(the_cib, PCMK__CIB_REQUEST_MODIFY, NULL, XML_CIB_TAG_STATUS, xml_top, NULL, flags, a->user); crm_info("Sent CIB request %d with %d change%s for %s (id %s, set %s)", a->update, cib_updates, pcmk__plural_s(cib_updates), a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a")); the_cib->cmds->register_callback_full(the_cib, a->update, CIB_OP_TIMEOUT_S, FALSE, strdup(a->id), "attrd_cib_callback", attrd_cib_callback, free); /* Transmit alert of the attribute */ send_alert_attributes_value(a, alert_attribute_value); } g_hash_table_destroy(alert_attribute_value); free_xml(xml_top); } void attrd_write_attributes(bool all, bool ignore_delay) { GHashTableIter iter; attribute_t *a = NULL; crm_debug("Writing out %s attributes", all? "all" : "changed"); g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) { if (!all && a->unknown_peer_uuids) { // Try writing this attribute again, in case peer ID was learned a->changed = true; } else if (a->force_write) { /* If the force_write flag is set, write the attribute. */ a->changed = true; } if(all || a->changed) { /* When forced write flag is set, ignore delay. */ attrd_write_attribute(a, (a->force_write ? true : ignore_delay)); } else { crm_trace("Skipping unchanged attribute %s", a->id); } } } void attrd_write_or_elect_attribute(attribute_t *a) { if (attrd_election_won()) { attrd_write_attribute(a, false); } else { attrd_start_election_if_needed(); } } diff --git a/daemons/attrd/attrd_corosync.c b/daemons/attrd/attrd_corosync.c index 4695ca816b..ef205e6e13 100644 --- a/daemons/attrd/attrd_corosync.c +++ b/daemons/attrd/attrd_corosync.c @@ -1,619 +1,620 @@ /* * Copyright 2013-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" extern crm_exit_t attrd_exit_status; static xmlNode * attrd_confirmation(int callid) { xmlNode *node = create_xml_node(NULL, __func__); crm_xml_add(node, F_TYPE, T_ATTRD); crm_xml_add(node, F_ORIG, get_local_node_name()); crm_xml_add(node, PCMK__XA_TASK, PCMK__ATTRD_CMD_CONFIRM); crm_xml_add_int(node, XML_LRM_ATTR_CALLID, callid); return node; } static void attrd_peer_message(crm_node_t *peer, xmlNode *xml) { const char *election_op = crm_element_value(xml, F_CRM_TASK); if (election_op) { attrd_handle_election_op(peer, xml); return; } if (attrd_shutting_down()) { /* If we're shutting down, we want to continue responding to election * ops as long as we're a cluster member (because our vote may be * needed). Ignore all other messages. */ return; } else { pcmk__request_t request = { .ipc_client = NULL, .ipc_id = 0, .ipc_flags = 0, .peer = peer->uname, .xml = xml, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; request.op = crm_element_value_copy(request.xml, PCMK__XA_TASK); CRM_CHECK(request.op != NULL, return); attrd_handle_request(&request); /* Having finished handling the request, check to see if the originating * peer requested confirmation. If so, send that confirmation back now. */ if (pcmk__xe_attr_is_true(xml, PCMK__XA_CONFIRM) && !pcmk__str_eq(request.op, PCMK__ATTRD_CMD_CONFIRM, pcmk__str_none)) { int callid = 0; xmlNode *reply = NULL; /* Add the confirmation ID for the message we are confirming to the * response so the originating peer knows what they're a confirmation * for. */ crm_element_value_int(xml, XML_LRM_ATTR_CALLID, &callid); reply = attrd_confirmation(callid); /* And then send the confirmation back to the originating peer. This * ends up right back in this same function (attrd_peer_message) on the * peer where it will have to do something with a PCMK__XA_CONFIRM type * message. */ crm_debug("Sending %s a confirmation", peer->uname); attrd_send_message(peer, reply, false); free_xml(reply); } pcmk__reset_request(&request); } } static void attrd_cpg_dispatch(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { xml = string2xml(data); } if (xml == NULL) { crm_err("Bad message of class %d received from %s[%u]: '%.120s'", kind, from, nodeid, data); } else { crm_node_t *peer = crm_get_peer(nodeid, from); attrd_peer_message(peer, xml); } free_xml(xml); free(data); } static void attrd_cpg_destroy(gpointer unused) { if (attrd_shutting_down()) { crm_info("Corosync disconnection complete"); } else { crm_crit("Lost connection to cluster layer, shutting down"); attrd_exit_status = CRM_EX_DISCONNECT; attrd_shutdown(0); } } /*! * \internal * \brief Override an attribute sync with a local value * * Broadcast the local node's value for an attribute that's different from the * value provided in a peer's attribute synchronization response. This ensures a * node's values for itself take precedence and all peers are kept in sync. * * \param[in] a Attribute entry to override * * \return Local instance of attribute value */ static attribute_value_t * broadcast_local_value(const attribute_t *a) { attribute_value_t *v = g_hash_table_lookup(a->values, attrd_cluster->uname); xmlNode *sync = create_xml_node(NULL, __func__); crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); attrd_add_value_xml(sync, a, v, false); attrd_send_message(NULL, sync, false); free_xml(sync); return v; } /*! * \internal * \brief Ensure a Pacemaker Remote node is in the correct peer cache * * \param[in] node_name Name of Pacemaker Remote node to check */ static void cache_remote_node(const char *node_name) { /* If we previously assumed this node was an unseen cluster node, * remove its entry from the cluster peer cache. */ crm_node_t *dup = pcmk__search_cluster_node_cache(0, node_name); if (dup && (dup->uuid == NULL)) { reap_crm_member(0, node_name); } // Ensure node is in the remote peer cache CRM_ASSERT(crm_remote_peer_get(node_name) != NULL); } #define state_text(state) pcmk__s((state), "in unknown state") /*! * \internal * \brief Return host's hash table entry (creating one if needed) * * \param[in,out] values Hash table of values * \param[in] host Name of peer to look up * \param[in] xml XML describing the attribute * * \return Pointer to new or existing hash table entry */ static attribute_value_t * attrd_lookup_or_create_value(GHashTable *values, const char *host, const xmlNode *xml) { attribute_value_t *v = g_hash_table_lookup(values, host); int is_remote = 0; crm_element_value_int(xml, PCMK__XA_ATTR_IS_REMOTE, &is_remote); if (is_remote) { cache_remote_node(host); } if (v == NULL) { v = calloc(1, sizeof(attribute_value_t)); CRM_ASSERT(v != NULL); pcmk__str_update(&v->nodename, host); v->is_remote = is_remote; g_hash_table_replace(values, v->nodename, v); } return(v); } static void attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) { bool gone = false; bool is_remote = pcmk_is_set(peer->flags, crm_remote_node); switch (kind) { case crm_status_uname: crm_debug("%s node %s is now %s", (is_remote? "Remote" : "Cluster"), peer->uname, state_text(peer->state)); break; case crm_status_processes: if (!pcmk_is_set(peer->processes, crm_get_cluster_proc())) { gone = true; } crm_debug("Node %s is %s a peer", peer->uname, (gone? "no longer" : "now")); break; case crm_status_nstate: crm_debug("%s node %s is now %s (was %s)", (is_remote? "Remote" : "Cluster"), peer->uname, state_text(peer->state), state_text(data)); if (pcmk__str_eq(peer->state, CRM_NODE_MEMBER, pcmk__str_casei)) { /* If we're the writer, send new peers a list of all attributes * (unless it's a remote node, which doesn't run its own attrd) */ if (attrd_election_won() && !pcmk_is_set(peer->flags, crm_remote_node)) { attrd_peer_sync(peer, NULL); } } else { // Remove all attribute values associated with lost nodes attrd_peer_remove(peer->uname, false, "loss"); gone = true; } break; } // Remove votes from cluster nodes that leave, in case election in progress if (gone && !is_remote) { attrd_remove_voter(peer); attrd_remove_peer_protocol_ver(peer->uname); attrd_do_not_expect_from_peer(peer->uname); // Ensure remote nodes that come up are in the remote node cache } else if (!gone && is_remote) { cache_remote_node(peer->uname); } } static void record_peer_nodeid(attribute_value_t *v, const char *host) { crm_node_t *known_peer = crm_get_peer(v->nodeid, host); crm_trace("Learned %s has node id %s", known_peer->uname, known_peer->uuid); if (attrd_election_won()) { attrd_write_attributes(false, false); } } static void update_attr_on_host(attribute_t *a, const crm_node_t *peer, const xmlNode *xml, const char *attr, const char *value, const char *host, bool filter, int is_force_write) { attribute_value_t *v = NULL; v = attrd_lookup_or_create_value(a->values, host, xml); if (filter && !pcmk__str_eq(v->current, value, pcmk__str_casei) && pcmk__str_eq(host, attrd_cluster->uname, pcmk__str_casei)) { crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s", attr, host, v->current, value, peer->uname); v = broadcast_local_value(a); } else if (!pcmk__str_eq(v->current, value, pcmk__str_casei)) { - crm_notice("Setting %s[%s]: %s -> %s " + crm_notice("Setting %s[%s]%s%s: %s -> %s " CRM_XS " from %s with %s write delay", - attr, host, pcmk__s(v->current, "(unset)"), + attr, host, a->set_type ? " in " : "", + pcmk__s(a->set_type, ""), pcmk__s(v->current, "(unset)"), pcmk__s(value, "(unset)"), peer->uname, (a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms)); pcmk__str_update(&v->current, value); a->changed = true; if (pcmk__str_eq(host, attrd_cluster->uname, pcmk__str_casei) && pcmk__str_eq(attr, XML_CIB_ATTR_SHUTDOWN, pcmk__str_none)) { if (!pcmk__str_eq(value, "0", pcmk__str_null_matches)) { attrd_set_requesting_shutdown(); } else { attrd_clear_requesting_shutdown(); } } // Write out new value or start dampening timer if (a->timeout_ms && a->timer) { crm_trace("Delayed write out (%dms) for %s", a->timeout_ms, attr); mainloop_timer_start(a->timer); } else { attrd_write_or_elect_attribute(a); } } else { if (is_force_write == 1 && a->timeout_ms && a->timer) { /* Save forced writing and set change flag. */ /* The actual attribute is written by Writer after election. */ crm_trace("Unchanged %s[%s] from %s is %s(Set the forced write flag)", attr, host, peer->uname, value); a->force_write = TRUE; } else { crm_trace("Unchanged %s[%s] from %s is %s", attr, host, peer->uname, value); } } /* Set the seen flag for attribute processing held only in the own node. */ v->seen = TRUE; /* If this is a cluster node whose node ID we are learning, remember it */ if ((v->nodeid == 0) && (v->is_remote == FALSE) && (crm_element_value_int(xml, PCMK__XA_ATTR_NODE_ID, (int*)&v->nodeid) == 0) && (v->nodeid > 0)) { record_peer_nodeid(v, host); } } static void attrd_peer_update_one(const crm_node_t *peer, xmlNode *xml, bool filter) { attribute_t *a = NULL; const char *attr = crm_element_value(xml, PCMK__XA_ATTR_NAME); const char *value = crm_element_value(xml, PCMK__XA_ATTR_VALUE); const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); int is_force_write = 0; if (attr == NULL) { crm_warn("Could not update attribute: peer did not specify name"); return; } crm_element_value_int(xml, PCMK__XA_ATTR_FORCE, &is_force_write); a = attrd_populate_attribute(xml, attr); if (a == NULL) { return; } if (host == NULL) { // If no host was specified, update all hosts GHashTableIter vIter; crm_debug("Setting %s for all hosts to %s", attr, value); xml_remove_prop(xml, PCMK__XA_ATTR_NODE_ID); g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, (gpointer *) & host, NULL)) { update_attr_on_host(a, peer, xml, attr, value, host, filter, is_force_write); } } else { // Update attribute value for the given host update_attr_on_host(a, peer, xml, attr, value, host, filter, is_force_write); } /* If this is a message from some attrd instance broadcasting its protocol * version, check to see if it's a new minimum version. */ if (pcmk__str_eq(attr, CRM_ATTR_PROTOCOL, pcmk__str_none)) { attrd_update_minimum_protocol_ver(peer->uname, value); } } static void broadcast_unseen_local_values(void) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; xmlNode *sync = NULL; g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { if (!(v->seen) && pcmk__str_eq(v->nodename, attrd_cluster->uname, pcmk__str_casei)) { if (sync == NULL) { sync = create_xml_node(NULL, __func__); crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); } attrd_add_value_xml(sync, a, v, a->timeout_ms && a->timer); } } } if (sync != NULL) { crm_debug("Broadcasting local-only values"); attrd_send_message(NULL, sync, false); free_xml(sync); } } int attrd_cluster_connect(void) { attrd_cluster = pcmk_cluster_new(); attrd_cluster->destroy = attrd_cpg_destroy; attrd_cluster->cpg.cpg_deliver_fn = attrd_cpg_dispatch; attrd_cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; crm_set_status_callback(&attrd_peer_change_cb); if (crm_cluster_connect(attrd_cluster) == FALSE) { crm_err("Cluster connection failed"); return -ENOTCONN; } return pcmk_ok; } void attrd_peer_clear_failure(pcmk__request_t *request) { xmlNode *xml = request->xml; const char *rsc = crm_element_value(xml, PCMK__XA_ATTR_RESOURCE); const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); const char *op = crm_element_value(xml, PCMK__XA_ATTR_OPERATION); const char *interval_spec = crm_element_value(xml, PCMK__XA_ATTR_INTERVAL); guint interval_ms = crm_parse_interval_spec(interval_spec); char *attr = NULL; GHashTableIter iter; regex_t regex; crm_node_t *peer = crm_get_peer(0, request->peer); if (attrd_failure_regex(®ex, rsc, op, interval_ms) != pcmk_ok) { crm_info("Ignoring invalid request to clear failures for %s", pcmk__s(rsc, "all resources")); return; } crm_xml_add(xml, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE); /* Make sure value is not set, so we delete */ if (crm_element_value(xml, PCMK__XA_ATTR_VALUE)) { crm_xml_replace(xml, PCMK__XA_ATTR_VALUE, NULL); } g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) { if (regexec(®ex, attr, 0, NULL, 0) == 0) { crm_trace("Matched %s when clearing %s", attr, pcmk__s(rsc, "all resources")); crm_xml_add(xml, PCMK__XA_ATTR_NAME, attr); attrd_peer_update(peer, xml, host, false); } } regfree(®ex); } /*! * \internal * \brief Load attributes from a peer sync response * * \param[in] peer Peer that sent clear request * \param[in] peer_won Whether peer is the attribute writer * \param[in,out] xml Request XML */ void attrd_peer_sync_response(const crm_node_t *peer, bool peer_won, xmlNode *xml) { crm_info("Processing " PCMK__ATTRD_CMD_SYNC_RESPONSE " from %s", peer->uname); if (peer_won) { /* Initialize the "seen" flag for all attributes to cleared, so we can * detect attributes that local node has but the writer doesn't. */ attrd_clear_value_seen(); } // Process each attribute update in the sync response for (xmlNode *child = pcmk__xml_first_child(xml); child != NULL; child = pcmk__xml_next(child)) { attrd_peer_update(peer, child, crm_element_value(child, PCMK__XA_ATTR_NODE_NAME), true); } if (peer_won) { /* If any attributes are still not marked as seen, the writer doesn't * know about them, so send all peers an update with them. */ broadcast_unseen_local_values(); } } /*! * \internal * \brief Remove all attributes and optionally peer cache entries for a node * * \param[in] host Name of node to purge * \param[in] uncache If true, remove node from peer caches * \param[in] source Who requested removal (only used for logging) */ void attrd_peer_remove(const char *host, bool uncache, const char *source) { attribute_t *a = NULL; GHashTableIter aIter; CRM_CHECK(host != NULL, return); crm_notice("Removing all %s attributes for peer %s", host, source); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { if(g_hash_table_remove(a->values, host)) { crm_debug("Removed %s[%s] for peer %s", a->id, host, source); } } if (uncache) { crm_remote_peer_cache_remove(host); reap_crm_member(0, host); } } void attrd_peer_sync(crm_node_t *peer, xmlNode *xml) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; xmlNode *sync = create_xml_node(NULL, __func__); crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { crm_debug("Syncing %s[%s] = %s to %s", a->id, v->nodename, v->current, peer?peer->uname:"everyone"); attrd_add_value_xml(sync, a, v, false); } } crm_debug("Syncing values to %s", peer?peer->uname:"everyone"); attrd_send_message(peer, sync, false); free_xml(sync); } void attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host, bool filter) { bool handle_sync_point = false; if (xml_has_children(xml)) { for (xmlNode *child = first_named_child(xml, XML_ATTR_OP); child != NULL; child = crm_next_same_xml(child)) { attrd_copy_xml_attributes(xml, child); attrd_peer_update_one(peer, child, filter); if (attrd_request_has_sync_point(child)) { handle_sync_point = true; } } } else { attrd_peer_update_one(peer, xml, filter); if (attrd_request_has_sync_point(xml)) { handle_sync_point = true; } } /* If the update XML specified that the client wanted to wait for a sync * point, process that now. */ if (handle_sync_point) { crm_trace("Hit local sync point for attribute update"); attrd_ack_waitlist_clients(attrd_sync_point_local, xml); } } diff --git a/daemons/attrd/attrd_sync.c b/daemons/attrd/attrd_sync.c index 69367716ba..d59ddd5e5e 100644 --- a/daemons/attrd/attrd_sync.c +++ b/daemons/attrd/attrd_sync.c @@ -1,577 +1,577 @@ /* - * Copyright 2022 the Pacemaker project contributors + * Copyright 2022-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include "pacemaker-attrd.h" /* A hash table storing clients that are waiting on a sync point to be reached. * The key is waitlist_client - just a plain int. The obvious key would be * the IPC client's ID, but this is not guaranteed to be unique. A single client * could be waiting on a sync point for multiple attributes at the same time. * * It is not expected that this hash table will ever be especially large. */ static GHashTable *waitlist = NULL; static int waitlist_client = 0; struct waitlist_node { /* What kind of sync point does this node describe? */ enum attrd_sync_point sync_point; /* Information required to construct and send a reply to the client. */ char *client_id; uint32_t ipc_id; uint32_t flags; }; /* A hash table storing information on in-progress IPC requests that are awaiting * confirmations. These requests are currently being processed by peer attrds and * we are waiting to receive confirmation messages from each peer indicating that * processing is complete. * * Multiple requests could be waiting on confirmations at the same time. * * The key is the unique callid for the IPC request, and the value is a * confirmation_action struct. */ static GHashTable *expected_confirmations = NULL; /*! * \internal * \brief A structure describing a single IPC request that is awaiting confirmations */ struct confirmation_action { /*! * \brief A list of peer attrds that we are waiting to receive confirmation * messages from * * This list is dynamic - as confirmations arrive from peer attrds, they will * be removed from this list. When the list is empty, all peers have processed * the request and the associated confirmation action will be taken. */ GList *respondents; /*! * \brief A timer that will be used to remove the client should it time out * before receiving all confirmations */ mainloop_timer_t *timer; /*! * \brief A function to run when all confirmations have been received */ attrd_confirmation_action_fn fn; /*! * \brief Information required to construct and send a reply to the client */ char *client_id; uint32_t ipc_id; uint32_t flags; /*! * \brief The XML request containing the callid associated with this action */ void *xml; }; static void next_key(void) { do { waitlist_client++; if (waitlist_client < 0) { waitlist_client = 1; } } while (g_hash_table_contains(waitlist, GINT_TO_POINTER(waitlist_client))); } static void free_waitlist_node(gpointer data) { struct waitlist_node *wl = (struct waitlist_node *) data; free(wl->client_id); free(wl); } static const char * sync_point_str(enum attrd_sync_point sync_point) { if (sync_point == attrd_sync_point_local) { return PCMK__VALUE_LOCAL; } else if (sync_point == attrd_sync_point_cluster) { return PCMK__VALUE_CLUSTER; } else { return "unknown"; } } /*! * \internal * \brief Add a client to the attrd waitlist * * Typically, a client receives an ACK for its XML IPC request immediately. However, * some clients want to wait until their request has been processed and taken effect. * This is called a sync point. Any client placed on this waitlist will have its * ACK message delayed until either its requested sync point is hit, or until it * times out. * * The XML IPC request must specify the type of sync point it wants to wait for. * * \param[in,out] request The request describing the client to place on the waitlist. */ void attrd_add_client_to_waitlist(pcmk__request_t *request) { const char *sync_point = attrd_request_sync_point(request->xml); struct waitlist_node *wl = NULL; if (sync_point == NULL) { return; } if (waitlist == NULL) { waitlist = pcmk__intkey_table(free_waitlist_node); } wl = calloc(sizeof(struct waitlist_node), 1); CRM_ASSERT(wl != NULL); wl->client_id = strdup(request->ipc_client->id); CRM_ASSERT(wl->client_id); if (pcmk__str_eq(sync_point, PCMK__VALUE_LOCAL, pcmk__str_none)) { wl->sync_point = attrd_sync_point_local; } else if (pcmk__str_eq(sync_point, PCMK__VALUE_CLUSTER, pcmk__str_none)) { wl->sync_point = attrd_sync_point_cluster; } else { free_waitlist_node(wl); return; } wl->ipc_id = request->ipc_id; wl->flags = request->flags; next_key(); pcmk__intkey_table_insert(waitlist, waitlist_client, wl); crm_trace("Added client %s to waitlist for %s sync point", wl->client_id, sync_point_str(wl->sync_point)); crm_trace("%d clients now on waitlist", g_hash_table_size(waitlist)); /* And then add the key to the request XML so we can uniquely identify * it when it comes time to issue the ACK. */ crm_xml_add_int(request->xml, XML_LRM_ATTR_CALLID, waitlist_client); } /*! * \internal * \brief Free all memory associated with the waitlist. This is most typically * used when attrd shuts down. */ void attrd_free_waitlist(void) { if (waitlist == NULL) { return; } g_hash_table_destroy(waitlist); waitlist = NULL; } /*! * \internal * \brief Unconditionally remove a client from the waitlist, such as when the client * node disconnects from the cluster * * \param[in] client The client to remove */ void attrd_remove_client_from_waitlist(pcmk__client_t *client) { GHashTableIter iter; gpointer value; if (waitlist == NULL) { return; } g_hash_table_iter_init(&iter, waitlist); while (g_hash_table_iter_next(&iter, NULL, &value)) { struct waitlist_node *wl = (struct waitlist_node *) value; if (pcmk__str_eq(wl->client_id, client->id, pcmk__str_none)) { g_hash_table_iter_remove(&iter); crm_trace("%d clients now on waitlist", g_hash_table_size(waitlist)); } } } /*! * \internal * \brief Send an IPC ACK message to all awaiting clients * * This function will search the waitlist for all clients that are currently awaiting * an ACK indicating their attrd operation is complete. Only those clients with a * matching sync point type and callid from their original XML IPC request will be * ACKed. Once they have received an ACK, they will be removed from the waitlist. * * \param[in] sync_point What kind of sync point have we hit? * \param[in] xml The original XML IPC request. */ void attrd_ack_waitlist_clients(enum attrd_sync_point sync_point, const xmlNode *xml) { int callid; gpointer value; if (waitlist == NULL) { return; } if (crm_element_value_int(xml, XML_LRM_ATTR_CALLID, &callid) == -1) { crm_warn("Could not get callid from request XML"); return; } value = pcmk__intkey_table_lookup(waitlist, callid); if (value != NULL) { struct waitlist_node *wl = (struct waitlist_node *) value; pcmk__client_t *client = NULL; if (wl->sync_point != sync_point) { return; } - crm_trace("Alerting client %s for reached %s sync point", - wl->client_id, sync_point_str(wl->sync_point)); + crm_notice("Alerting client %s for reached %s sync point", + wl->client_id, sync_point_str(wl->sync_point)); client = pcmk__find_client_by_id(wl->client_id); if (client == NULL) { return; } attrd_send_ack(client, wl->ipc_id, wl->flags | crm_ipc_client_response); /* And then remove the client so it doesn't get alerted again. */ pcmk__intkey_table_remove(waitlist, callid); crm_trace("%d clients now on waitlist", g_hash_table_size(waitlist)); } } /*! * \internal * \brief Action to take when a cluster sync point is hit for a * PCMK__ATTRD_CMD_UPDATE* message. * * \param[in] xml The request that should be passed along to * attrd_ack_waitlist_clients. This should be the original * IPC request containing the callid for this update message. */ int attrd_cluster_sync_point_update(xmlNode *xml) { crm_trace("Hit cluster sync point for attribute update"); attrd_ack_waitlist_clients(attrd_sync_point_cluster, xml); return pcmk_rc_ok; } /*! * \internal * \brief Return the sync point attribute for an IPC request * * This function will check both the top-level element of \p xml for a sync * point attribute, as well as all of its \p op children, if any. The latter * is useful for newer versions of attrd that can put multiple IPC requests * into a single message. * * \param[in] xml An XML IPC request * * \note It is assumed that if one child element has a sync point attribute, * all will have a sync point attribute and they will all be the same * sync point. No other configuration is supported. * * \return The sync point attribute of \p xml, or NULL if none. */ const char * attrd_request_sync_point(xmlNode *xml) { if (xml_has_children(xml)) { xmlNode *child = pcmk__xe_match(xml, XML_ATTR_OP, PCMK__XA_ATTR_SYNC_POINT, NULL); if (child) { return crm_element_value(child, PCMK__XA_ATTR_SYNC_POINT); } else { return NULL; } } else { return crm_element_value(xml, PCMK__XA_ATTR_SYNC_POINT); } } /*! * \internal * \brief Does an IPC request contain any sync point attribute? * * \param[in] xml An XML IPC request * * \return true if there's a sync point attribute, false otherwise */ bool attrd_request_has_sync_point(xmlNode *xml) { return attrd_request_sync_point(xml) != NULL; } static void free_action(gpointer data) { struct confirmation_action *action = (struct confirmation_action *) data; g_list_free_full(action->respondents, free); mainloop_timer_del(action->timer); free_xml(action->xml); free(action->client_id); free(action); } /* Remove an IPC request from the expected_confirmations table if the peer attrds * don't respond before the timeout is hit. We set the timeout to 15s. The exact * number isn't critical - we just want to make sure that the table eventually gets * cleared of things that didn't complete. */ static gboolean confirmation_timeout_cb(gpointer data) { struct confirmation_action *action = (struct confirmation_action *) data; GHashTableIter iter; gpointer value; if (expected_confirmations == NULL) { return G_SOURCE_REMOVE; } g_hash_table_iter_init(&iter, expected_confirmations); while (g_hash_table_iter_next(&iter, NULL, &value)) { if (value == action) { pcmk__client_t *client = pcmk__find_client_by_id(action->client_id); if (client == NULL) { return G_SOURCE_REMOVE; } crm_trace("Timed out waiting for confirmations for client %s", client->id); pcmk__ipc_send_ack(client, action->ipc_id, action->flags | crm_ipc_client_response, "ack", ATTRD_PROTOCOL_VERSION, CRM_EX_TIMEOUT); g_hash_table_iter_remove(&iter); crm_trace("%d requests now in expected confirmations table", g_hash_table_size(expected_confirmations)); break; } } return G_SOURCE_REMOVE; } /*! * \internal * \brief When a peer disconnects from the cluster, no longer wait for its confirmation * for any IPC action. If this peer is the last one being waited on, this will * trigger the confirmation action. * * \param[in] host The disconnecting peer attrd's uname */ void attrd_do_not_expect_from_peer(const char *host) { GList *keys = NULL; if (expected_confirmations == NULL) { return; } keys = g_hash_table_get_keys(expected_confirmations); crm_trace("Removing peer %s from expected confirmations", host); for (GList *node = keys; node != NULL; node = node->next) { int callid = *(int *) node->data; attrd_handle_confirmation(callid, host); } g_list_free(keys); } /*! * \internal * \brief When a client disconnects from the cluster, no longer wait on confirmations * for it. Because the peer attrds may still be processing the original IPC * message, they may still send us confirmations. However, we will take no * action on them. * * \param[in] client The disconnecting client */ void attrd_do_not_wait_for_client(pcmk__client_t *client) { GHashTableIter iter; gpointer value; if (expected_confirmations == NULL) { return; } g_hash_table_iter_init(&iter, expected_confirmations); while (g_hash_table_iter_next(&iter, NULL, &value)) { struct confirmation_action *action = (struct confirmation_action *) value; if (pcmk__str_eq(action->client_id, client->id, pcmk__str_none)) { crm_trace("Removing client %s from expected confirmations", client->id); g_hash_table_iter_remove(&iter); crm_trace("%d requests now in expected confirmations table", g_hash_table_size(expected_confirmations)); break; } } } /*! * \internal * \brief Register some action to be taken when IPC request confirmations are * received * * When this function is called, a list of all peer attrds that support confirming * requests is generated. As confirmations from these peer attrds are received, * they are removed from this list. When the list is empty, the registered action * will be called. * * \note This function should always be called before attrd_send_message is called * to broadcast to the peers to ensure that we know what replies we are * waiting on. Otherwise, it is possible the peer could finish and confirm * before we know to expect it. * * \param[in] request The request that is awaiting confirmations * \param[in] fn A function to be run after all confirmations are received */ void attrd_expect_confirmations(pcmk__request_t *request, attrd_confirmation_action_fn fn) { struct confirmation_action *action = NULL; GHashTableIter iter; gpointer host, ver; GList *respondents = NULL; int callid; if (expected_confirmations == NULL) { expected_confirmations = pcmk__intkey_table((GDestroyNotify) free_action); } if (crm_element_value_int(request->xml, XML_LRM_ATTR_CALLID, &callid) == -1) { crm_err("Could not get callid from xml"); return; } if (pcmk__intkey_table_lookup(expected_confirmations, callid)) { crm_err("Already waiting on confirmations for call id %d", callid); return; } g_hash_table_iter_init(&iter, peer_protocol_vers); while (g_hash_table_iter_next(&iter, &host, &ver)) { if (ATTRD_SUPPORTS_CONFIRMATION(GPOINTER_TO_INT(ver))) { char *s = strdup((char *) host); CRM_ASSERT(s != NULL); respondents = g_list_prepend(respondents, s); } } action = calloc(1, sizeof(struct confirmation_action)); CRM_ASSERT(action != NULL); action->respondents = respondents; action->fn = fn; action->xml = copy_xml(request->xml); action->client_id = strdup(request->ipc_client->id); CRM_ASSERT(action->client_id != NULL); action->ipc_id = request->ipc_id; action->flags = request->flags; action->timer = mainloop_timer_add(NULL, 15000, FALSE, confirmation_timeout_cb, action); mainloop_timer_start(action->timer); pcmk__intkey_table_insert(expected_confirmations, callid, action); crm_trace("Callid %d now waiting on %d confirmations", callid, g_list_length(respondents)); crm_trace("%d requests now in expected confirmations table", g_hash_table_size(expected_confirmations)); } void attrd_free_confirmations(void) { if (expected_confirmations != NULL) { g_hash_table_destroy(expected_confirmations); expected_confirmations = NULL; } } /*! * \internal * \brief Process a confirmation message from a peer attrd * * This function is called every time a PCMK__ATTRD_CMD_CONFIRM message is * received from a peer attrd. If this is the last confirmation we are waiting * on for a given operation, the registered action will be called. * * \param[in] callid The unique callid for the XML IPC request * \param[in] host The confirming peer attrd's uname */ void attrd_handle_confirmation(int callid, const char *host) { struct confirmation_action *action = NULL; GList *node = NULL; if (expected_confirmations == NULL) { return; } action = pcmk__intkey_table_lookup(expected_confirmations, callid); if (action == NULL) { return; } node = g_list_find_custom(action->respondents, host, (GCompareFunc) strcasecmp); if (node == NULL) { return; } action->respondents = g_list_remove(action->respondents, node->data); crm_trace("Callid %d now waiting on %d confirmations", callid, g_list_length(action->respondents)); if (action->respondents == NULL) { action->fn(action->xml); pcmk__intkey_table_remove(expected_confirmations, callid); crm_trace("%d requests now in expected confirmations table", g_hash_table_size(expected_confirmations)); } } diff --git a/daemons/attrd/pacemaker-attrd.c b/daemons/attrd/pacemaker-attrd.c index da52f54e5b..9fd861b946 100644 --- a/daemons/attrd/pacemaker-attrd.c +++ b/daemons/attrd/pacemaker-attrd.c @@ -1,320 +1,359 @@ /* - * Copyright 2013-2022 the Pacemaker project contributors + * Copyright 2013-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" #define SUMMARY "daemon for managing Pacemaker node attributes" +gboolean stand_alone = FALSE; +gchar **log_files = NULL; + +static GOptionEntry entries[] = { + { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone, + "(Advanced use only) Run in stand-alone mode", NULL }, + + { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY, + &log_files, "Send logs to the additional named logfile", NULL }, + + { NULL } +}; + static pcmk__output_t *out = NULL; static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; lrmd_t *the_lrmd = NULL; crm_cluster_t *attrd_cluster = NULL; crm_trigger_t *attrd_config_read = NULL; crm_exit_t attrd_exit_status = CRM_EX_OK; static void attrd_cib_destroy_cb(gpointer user_data) { cib_t *conn = user_data; conn->cmds->signoff(conn); /* Ensure IPC is cleaned up */ if (attrd_shutting_down()) { crm_info("Connection disconnection complete"); } else { /* eventually this should trigger a reconnect, not a shutdown */ crm_crit("Lost connection to the CIB manager, shutting down"); attrd_exit_status = CRM_EX_DISCONNECT; attrd_shutdown(0); } return; } static void attrd_erase_cb(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { do_crm_log_unlikely((rc? LOG_NOTICE : LOG_DEBUG), "Cleared transient attributes: %s " CRM_XS " xpath=%s rc=%d", pcmk_strerror(rc), (char *) user_data, rc); } #define XPATH_TRANSIENT "//node_state[@uname='%s']/" XML_TAG_TRANSIENT_NODEATTRS /*! * \internal * \brief Wipe all transient attributes for this node from the CIB * * Clear any previous transient node attributes from the CIB. This is * normally done by the DC's controller when this node leaves the cluster, but * this handles the case where the node restarted so quickly that the * cluster layer didn't notice. * * \todo If pacemaker-attrd respawns after crashing (see PCMK_respawned), * ideally we'd skip this and sync our attributes from the writer. * However, currently we reject any values for us that the writer has, in * attrd_peer_update(). */ static void attrd_erase_attrs(void) { int call_id; char *xpath = crm_strdup_printf(XPATH_TRANSIENT, attrd_cluster->uname); crm_info("Clearing transient attributes from CIB " CRM_XS " xpath=%s", xpath); call_id = the_cib->cmds->remove(the_cib, xpath, NULL, cib_quorum_override | cib_xpath); the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, xpath, "attrd_erase_cb", attrd_erase_cb, free); } static int attrd_cib_connect(int max_retry) { static int attempts = 0; int rc = -ENOTCONN; the_cib = cib_new(); if (the_cib == NULL) { return -ENOTCONN; } do { if(attempts > 0) { sleep(attempts); } attempts++; crm_debug("Connection attempt %d to the CIB manager", attempts); rc = the_cib->cmds->signon(the_cib, T_ATTRD, cib_command); } while(rc != pcmk_ok && attempts < max_retry); if (rc != pcmk_ok) { crm_err("Connection to the CIB manager failed: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); goto cleanup; } crm_debug("Connected to the CIB manager after %d attempts", attempts); rc = the_cib->cmds->set_connection_dnotify(the_cib, attrd_cib_destroy_cb); if (rc != pcmk_ok) { crm_err("Could not set disconnection callback"); goto cleanup; } rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_REPLACE_NOTIFY, attrd_cib_replaced_cb); if(rc != pcmk_ok) { crm_err("Could not set CIB notification callback"); goto cleanup; } rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, attrd_cib_updated_cb); if (rc != pcmk_ok) { crm_err("Could not set CIB notification callback (update)"); goto cleanup; } return pcmk_ok; cleanup: cib__clean_up_connection(&the_cib); return -ENOTCONN; } /*! * \internal * \brief Prepare the CIB after cluster is connected */ static void attrd_cib_init(void) { // We have no attribute values in memory, wipe the CIB to match attrd_erase_attrs(); // Set a trigger for reading the CIB (for the alerts section) attrd_config_read = mainloop_add_trigger(G_PRIORITY_HIGH, attrd_read_options, NULL); // Always read the CIB at start-up mainloop_set_trigger(attrd_config_read); } static bool ipc_already_running(void) { pcmk_ipc_api_t *old_instance = NULL; int rc = pcmk_rc_ok; rc = pcmk_new_ipc_api(&old_instance, pcmk_ipc_attrd); if (rc != pcmk_rc_ok) { return false; } rc = pcmk_connect_ipc(old_instance, pcmk_ipc_dispatch_sync); if (rc != pcmk_rc_ok) { pcmk_free_ipc_api(old_instance); return false; } pcmk_disconnect_ipc(old_instance); pcmk_free_ipc_api(old_instance); return true; } static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { - return pcmk__build_arg_context(args, "text (default), xml", group, NULL); + GOptionContext *context = NULL; + + context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); + pcmk__add_main_args(context, entries); + return context; } int main(int argc, char **argv) { int rc = pcmk_rc_ok; GError *error = NULL; bool initialized = false; GOptionGroup *output_group = NULL; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); gchar **processed_args = pcmk__cmdline_preproc(argv, NULL); GOptionContext *context = build_arg_context(args, &output_group); attrd_init_mainloop(); crm_log_preinit(NULL, argc, argv); mainloop_add_signal(SIGTERM, attrd_shutdown); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { attrd_exit_status = CRM_EX_USAGE; goto done; } rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if ((rc != pcmk_rc_ok) || (out == NULL)) { attrd_exit_status = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); goto done; } if (args->version) { out->version(out, false); goto done; } - initialized = true; + // Open additional log files + pcmk__add_logfiles(log_files, out); crm_log_init(T_ATTRD, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); - crm_notice("Starting Pacemaker node attribute manager"); + crm_notice("Starting Pacemaker node attribute manager%s", + stand_alone ? " in standalone mode" : ""); if (ipc_already_running()) { - crm_err("pacemaker-attrd is already active, aborting startup"); - crm_exit(CRM_EX_OK); + const char *msg = "pacemaker-attrd is already active, aborting startup"; + + attrd_exit_status = CRM_EX_OK; + g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, "%s", msg); + crm_err(msg); + goto done; } + initialized = true; + attributes = pcmk__strkey_table(NULL, attrd_free_attribute); /* Connect to the CIB before connecting to the cluster or listening for IPC. * This allows us to assume the CIB is connected whenever we process a * cluster or IPC message (which also avoids start-up race conditions). */ - if (attrd_cib_connect(30) != pcmk_ok) { - attrd_exit_status = CRM_EX_FATAL; - goto done; + if (!stand_alone) { + if (attrd_cib_connect(30) != pcmk_ok) { + attrd_exit_status = CRM_EX_FATAL; + g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, + "Could not connect to the CIB"); + goto done; + } + crm_info("CIB connection active"); } - crm_info("CIB connection active"); if (attrd_cluster_connect() != pcmk_ok) { attrd_exit_status = CRM_EX_FATAL; + g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, + "Could not connect to the cluster"); goto done; } crm_info("Cluster connection active"); // Initialization that requires the cluster to be connected attrd_election_init(); - attrd_cib_init(); + + if (!stand_alone) { + attrd_cib_init(); + } /* Set a private attribute for ourselves with the protocol version we * support. This lets all nodes determine the minimum supported version * across all nodes. It also ensures that the writer learns our node name, * so it can send our attributes to the CIB. */ attrd_broadcast_protocol(); attrd_init_ipc(); crm_notice("Pacemaker node attribute manager successfully started and accepting connections"); attrd_run_mainloop(); done: if (initialized) { crm_info("Shutting down attribute manager"); attrd_election_fini(); attrd_ipc_fini(); attrd_lrmd_disconnect(); - attrd_cib_disconnect(); - attrd_free_waitlist(); + if (!stand_alone) { + attrd_cib_disconnect(); + } + + attrd_free_waitlist(); pcmk_cluster_free(attrd_cluster); g_hash_table_destroy(attributes); } g_strfreev(processed_args); pcmk__free_arg_context(context); + g_strfreev(log_files); + pcmk__output_and_clear_error(error, out); if (out != NULL) { out->finish(out, attrd_exit_status, true, NULL); pcmk__output_free(out); } pcmk__unregister_formats(); crm_exit(attrd_exit_status); } diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h index 2803cbf9f2..329fb5ad8c 100644 --- a/daemons/attrd/pacemaker-attrd.h +++ b/daemons/attrd/pacemaker-attrd.h @@ -1,214 +1,216 @@ /* * Copyright 2013-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #ifndef PACEMAKER_ATTRD__H # define PACEMAKER_ATTRD__H #include #include #include #include #include #include #include /* * Legacy attrd (all pre-1.1.11 Pacemaker versions, plus all versions when used * with the no-longer-supported CMAN or corosync-plugin stacks) is unversioned. * * With atomic attrd, each attrd will send ATTRD_PROTOCOL_VERSION with every * peer request and reply. As of Pacemaker 2.0.0, at start-up each attrd will * also set a private attribute for itself with its version, so any attrd can * determine the minimum version supported by all peers. * * Protocol Pacemaker Significant changes * -------- --------- ------------------- * 1 1.1.11 PCMK__ATTRD_CMD_UPDATE (PCMK__XA_ATTR_NAME only), * PCMK__ATTRD_CMD_PEER_REMOVE, PCMK__ATTRD_CMD_REFRESH, * PCMK__ATTRD_CMD_FLUSH, PCMK__ATTRD_CMD_SYNC, * PCMK__ATTRD_CMD_SYNC_RESPONSE * 1 1.1.13 PCMK__ATTRD_CMD_UPDATE (with PCMK__XA_ATTR_PATTERN), * PCMK__ATTRD_CMD_QUERY * 1 1.1.15 PCMK__ATTRD_CMD_UPDATE_BOTH, * PCMK__ATTRD_CMD_UPDATE_DELAY * 2 1.1.17 PCMK__ATTRD_CMD_CLEAR_FAILURE * 3 2.1.1 PCMK__ATTRD_CMD_SYNC_RESPONSE indicates remote nodes * 4 2.1.5 Multiple attributes can be updated in a single IPC * message * 5 2.1.5 Peers can request confirmation of a sent message */ #define ATTRD_PROTOCOL_VERSION "5" #define ATTRD_SUPPORTS_MULTI_MESSAGE(x) ((x) >= 4) #define ATTRD_SUPPORTS_CONFIRMATION(x) ((x) >= 5) #define attrd_send_ack(client, id, flags) \ pcmk__ipc_send_ack((client), (id), (flags), "ack", ATTRD_PROTOCOL_VERSION, CRM_EX_INDETERMINATE) void attrd_init_mainloop(void); void attrd_run_mainloop(void); void attrd_set_requesting_shutdown(void); void attrd_clear_requesting_shutdown(void); void attrd_free_waitlist(void); bool attrd_requesting_shutdown(void); bool attrd_shutting_down(void); void attrd_shutdown(int nsig); void attrd_init_ipc(void); void attrd_ipc_fini(void); void attrd_cib_disconnect(void); bool attrd_value_needs_expansion(const char *value); int attrd_expand_value(const char *value, const char *old_value); /* regular expression to clear failures of all resources */ #define ATTRD_RE_CLEAR_ALL \ "^(" PCMK__FAIL_COUNT_PREFIX "|" PCMK__LAST_FAILURE_PREFIX ")-" /* regular expression to clear failure of all operations for one resource * (format takes resource name) * * @COMPAT attributes set < 1.1.17: * also match older attributes that do not have the operation part */ #define ATTRD_RE_CLEAR_ONE ATTRD_RE_CLEAR_ALL "%s(#.+_[0-9]+)?$" /* regular expression to clear failure of one operation for one resource * (format takes resource name, operation name, and interval) * * @COMPAT attributes set < 1.1.17: * also match older attributes that do not have the operation part */ #define ATTRD_RE_CLEAR_OP ATTRD_RE_CLEAR_ALL "%s(#%s_%u)?$" int attrd_failure_regex(regex_t *regex, const char *rsc, const char *op, guint interval_ms); extern cib_t *the_cib; /* Alerts */ extern lrmd_t *the_lrmd; extern crm_trigger_t *attrd_config_read; void attrd_lrmd_disconnect(void); gboolean attrd_read_options(gpointer user_data); void attrd_cib_replaced_cb(const char *event, xmlNode * msg); void attrd_cib_updated_cb(const char *event, xmlNode *msg); int attrd_send_attribute_alert(const char *node, int nodeid, const char *attr, const char *value); // Elections void attrd_election_init(void); void attrd_election_fini(void); void attrd_start_election_if_needed(void); bool attrd_election_won(void); void attrd_handle_election_op(const crm_node_t *peer, xmlNode *xml); bool attrd_check_for_new_writer(const crm_node_t *peer, const xmlNode *xml); void attrd_declare_winner(void); void attrd_remove_voter(const crm_node_t *peer); void attrd_xml_add_writer(xmlNode *xml); typedef struct attribute_s { char *uuid; /* TODO: Remove if at all possible */ char *id; char *set_id; char *set_type; GHashTable *values; int update; int timeout_ms; /* TODO: refactor these three as a bitmask */ bool changed; /* whether attribute value has changed since last write */ bool unknown_peer_uuids; /* whether we know we're missing a peer uuid */ gboolean is_private; /* whether to keep this attribute out of the CIB */ mainloop_timer_t *timer; char *user; gboolean force_write; /* Flag for updating attribute by ignoring delay */ } attribute_t; typedef struct attribute_value_s { uint32_t nodeid; gboolean is_remote; char *nodename; char *current; char *requested; gboolean seen; } attribute_value_t; extern crm_cluster_t *attrd_cluster; extern GHashTable *attributes; extern GHashTable *peer_protocol_vers; #define CIB_OP_TIMEOUT_S 120 int attrd_cluster_connect(void); void attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host, bool filter); void attrd_peer_sync(crm_node_t *peer, xmlNode *xml); void attrd_peer_remove(const char *host, bool uncache, const char *source); void attrd_peer_clear_failure(pcmk__request_t *request); void attrd_peer_sync_response(const crm_node_t *peer, bool peer_won, xmlNode *xml); void attrd_broadcast_protocol(void); xmlNode *attrd_client_peer_remove(pcmk__request_t *request); xmlNode *attrd_client_clear_failure(pcmk__request_t *request); xmlNode *attrd_client_update(pcmk__request_t *request); xmlNode *attrd_client_refresh(pcmk__request_t *request); xmlNode *attrd_client_query(pcmk__request_t *request); gboolean attrd_send_message(crm_node_t *node, xmlNode *data, bool confirm); xmlNode *attrd_add_value_xml(xmlNode *parent, const attribute_t *a, const attribute_value_t *v, bool force_write); void attrd_clear_value_seen(void); void attrd_free_attribute(gpointer data); void attrd_free_attribute_value(gpointer data); attribute_t *attrd_populate_attribute(xmlNode *xml, const char *attr); void attrd_write_attribute(attribute_t *a, bool ignore_delay); void attrd_write_attributes(bool all, bool ignore_delay); void attrd_write_or_elect_attribute(attribute_t *a); extern int minimum_protocol_version; void attrd_remove_peer_protocol_ver(const char *host); void attrd_update_minimum_protocol_ver(const char *host, const char *value); mainloop_timer_t *attrd_add_timer(const char *id, int timeout_ms, attribute_t *attr); void attrd_unregister_handlers(void); void attrd_handle_request(pcmk__request_t *request); enum attrd_sync_point { attrd_sync_point_local, attrd_sync_point_cluster, }; typedef int (*attrd_confirmation_action_fn)(xmlNode *); void attrd_add_client_to_waitlist(pcmk__request_t *request); void attrd_ack_waitlist_clients(enum attrd_sync_point sync_point, const xmlNode *xml); int attrd_cluster_sync_point_update(xmlNode *xml); void attrd_do_not_expect_from_peer(const char *host); void attrd_do_not_wait_for_client(pcmk__client_t *client); void attrd_expect_confirmations(pcmk__request_t *request, attrd_confirmation_action_fn fn); void attrd_free_confirmations(void); void attrd_handle_confirmation(int callid, const char *host); void attrd_remove_client_from_waitlist(pcmk__client_t *client); const char *attrd_request_sync_point(xmlNode *xml); bool attrd_request_has_sync_point(xmlNode *xml); void attrd_copy_xml_attributes(xmlNode *src, xmlNode *dest); +extern gboolean stand_alone; + #endif /* PACEMAKER_ATTRD__H */ diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c index 1109f98784..6b14dbb491 100644 --- a/daemons/fenced/pacemaker-fenced.c +++ b/daemons/fenced/pacemaker-fenced.c @@ -1,1752 +1,1743 @@ /* * Copyright 2009-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include // PRIu32, PRIx32 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SUMMARY "daemon for executing fencing devices in a Pacemaker cluster" char *stonith_our_uname = NULL; long stonith_watchdog_timeout_ms = 0; GList *stonith_watchdog_targets = NULL; static GMainLoop *mainloop = NULL; gboolean stand_alone = FALSE; static gboolean stonith_shutdown_flag = FALSE; static qb_ipcs_service_t *ipcs = NULL; static xmlNode *local_cib = NULL; static pe_working_set_t *fenced_data_set = NULL; static const unsigned long long data_set_flags = pe_flag_quick_location | pe_flag_no_compat | pe_flag_no_counts; static cib_t *cib_api = NULL; static pcmk__output_t *logger_out = NULL; static pcmk__output_t *out = NULL; pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; static struct { bool no_cib_connect; gchar **log_files; } options; static crm_exit_t exit_code = CRM_EX_OK; static void stonith_shutdown(int nsig); static void stonith_cleanup(void); static int32_t st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { if (stonith_shutdown_flag) { crm_info("Ignoring new client [%d] during shutdown", pcmk__client_pid(c)); return -EPERM; } if (pcmk__new_client(c, uid, gid) == NULL) { return -EIO; } return 0; } /* Exit code means? */ static int32_t st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; int call_options = 0; xmlNode *request = NULL; pcmk__client_t *c = pcmk__find_client(qbc); const char *op = NULL; if (c == NULL) { crm_info("Invalid client: %p", qbc); return 0; } request = pcmk__client_data2xml(c, data, &id, &flags); if (request == NULL) { pcmk__ipc_send_ack(c, id, flags, "nack", NULL, CRM_EX_PROTOCOL); return 0; } op = crm_element_value(request, F_CRM_TASK); if(pcmk__str_eq(op, CRM_OP_RM_NODE_CACHE, pcmk__str_casei)) { crm_xml_add(request, F_TYPE, T_STONITH_NG); crm_xml_add(request, F_STONITH_OPERATION, op); crm_xml_add(request, F_STONITH_CLIENTID, c->id); crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c)); crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname); send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE); free_xml(request); return 0; } if (c->name == NULL) { const char *value = crm_element_value(request, F_STONITH_CLIENTNAME); if (value == NULL) { value = "unknown"; } c->name = crm_strdup_printf("%s.%u", value, c->pid); } crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); crm_trace("Flags %#08" PRIx32 "/%#08x for command %" PRIu32 " from client %s", flags, call_options, id, pcmk__client_name(c)); if (pcmk_is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(flags & crm_ipc_client_response); CRM_LOG_ASSERT(c->request_id == 0); /* This means the client has two synchronous events in-flight */ c->request_id = id; /* Reply only to the last one */ } crm_xml_add(request, F_STONITH_CLIENTID, c->id); crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c)); crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname); crm_log_xml_trace(request, "ipc-received"); stonith_command(c, id, flags, request, NULL); free_xml(request); return 0; } /* Error code means? */ static int32_t st_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p closed", c); pcmk__free_client(client); /* 0 means: yes, go ahead and destroy the connection */ return 0; } static void st_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p destroyed", c); st_ipc_closed(c); } static void stonith_peer_callback(xmlNode * msg, void *private_data) { const char *remote_peer = crm_element_value(msg, F_ORIG); const char *op = crm_element_value(msg, F_STONITH_OPERATION); if (pcmk__str_eq(op, "poke", pcmk__str_none)) { return; } crm_log_xml_trace(msg, "Peer[inbound]"); stonith_command(NULL, 0, 0, msg, remote_peer); } #if SUPPORT_COROSYNC static void stonith_peer_ais_callback(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { xml = string2xml(data); if (xml == NULL) { crm_err("Invalid XML: '%.120s'", data); free(data); return; } crm_xml_add(xml, F_ORIG, from); /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */ stonith_peer_callback(xml, NULL); } free_xml(xml); free(data); return; } static void stonith_peer_cs_destroy(gpointer user_data) { crm_crit("Lost connection to cluster layer, shutting down"); stonith_shutdown(0); } #endif void do_local_reply(xmlNode *notify_src, pcmk__client_t *client, int call_options) { /* send callback to originating child */ int local_rc = pcmk_rc_ok; int rid = 0; uint32_t ipc_flags = crm_ipc_server_event; if (pcmk_is_set(call_options, st_opt_sync_call)) { CRM_LOG_ASSERT(client->request_id); rid = client->request_id; client->request_id = 0; ipc_flags = crm_ipc_flags_none; } local_rc = pcmk__ipc_send_xml(client, rid, notify_src, ipc_flags); if (local_rc == pcmk_rc_ok) { crm_trace("Sent response %d to client %s", rid, pcmk__client_name(client)); } else { crm_warn("%synchronous reply to client %s failed: %s", (pcmk_is_set(call_options, st_opt_sync_call)? "S" : "As"), pcmk__client_name(client), pcmk_rc_str(local_rc)); } } uint64_t get_stonith_flag(const char *name) { if (pcmk__str_eq(name, T_STONITH_NOTIFY_FENCE, pcmk__str_casei)) { return st_callback_notify_fence; } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_ADD, pcmk__str_casei)) { return st_callback_device_add; } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_DEL, pcmk__str_casei)) { return st_callback_device_del; } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY, pcmk__str_casei)) { return st_callback_notify_history; } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED, pcmk__str_casei)) { return st_callback_notify_history_synced; } return st_callback_unknown; } static void stonith_notify_client(gpointer key, gpointer value, gpointer user_data) { xmlNode *update_msg = user_data; pcmk__client_t *client = value; const char *type = NULL; CRM_CHECK(client != NULL, return); CRM_CHECK(update_msg != NULL, return); type = crm_element_value(update_msg, F_SUBTYPE); CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return); if (client->ipcs == NULL) { crm_trace("Skipping client with NULL channel"); return; } if (pcmk_is_set(client->flags, get_stonith_flag(type))) { int rc = pcmk__ipc_send_xml(client, 0, update_msg, crm_ipc_server_event); if (rc != pcmk_rc_ok) { crm_warn("%s notification of client %s failed: %s " CRM_XS " id=%.8s rc=%d", type, pcmk__client_name(client), pcmk_rc_str(rc), client->id, rc); } else { crm_trace("Sent %s notification to client %s", type, pcmk__client_name(client)); } } } void do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout) { pcmk__client_t *client = NULL; xmlNode *notify_data = NULL; if (!timeout || !call_id || !client_id) { return; } client = pcmk__find_client_by_id(client_id); if (!client) { return; } notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE); crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE); crm_xml_add(notify_data, F_STONITH_CALLID, call_id); crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout); crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id); if (client) { pcmk__ipc_send_xml(client, 0, notify_data, crm_ipc_server_event); } free_xml(notify_data); } /*! * \internal * \brief Notify relevant IPC clients of a fencing operation result * * \param[in] type Notification type * \param[in] result Result of fencing operation (assume success if NULL) * \param[in] data If not NULL, add to notification as call data */ void fenced_send_notification(const char *type, const pcmk__action_result_t *result, xmlNode *data) { /* TODO: Standardize the contents of data */ xmlNode *update_msg = create_xml_node(NULL, "notify"); CRM_LOG_ASSERT(type != NULL); crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY); crm_xml_add(update_msg, F_SUBTYPE, type); crm_xml_add(update_msg, F_STONITH_OPERATION, type); stonith__xe_set_result(update_msg, result); if (data != NULL) { add_message_xml(update_msg, F_STONITH_CALLDATA, data); } crm_trace("Notifying clients"); pcmk__foreach_ipc_client(stonith_notify_client, update_msg); free_xml(update_msg); crm_trace("Notify complete"); } /*! * \internal * \brief Send notifications for a configuration change to subscribed clients * * \param[in] op Notification type (STONITH_OP_DEVICE_ADD, * STONITH_OP_DEVICE_DEL, STONITH_OP_LEVEL_ADD, or * STONITH_OP_LEVEL_DEL) * \param[in] result Operation result * \param[in] desc Description of what changed * \param[in] active Current number of devices or topologies in use */ static void send_config_notification(const char *op, const pcmk__action_result_t *result, const char *desc, int active) { xmlNode *notify_data = create_xml_node(NULL, op); CRM_CHECK(notify_data != NULL, return); crm_xml_add(notify_data, F_STONITH_DEVICE, desc); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active); fenced_send_notification(op, result, notify_data); free_xml(notify_data); } /*! * \internal * \brief Send notifications for a device change to subscribed clients * * \param[in] op Notification type (STONITH_OP_DEVICE_ADD or * STONITH_OP_DEVICE_DEL) * \param[in] result Operation result * \param[in] desc ID of device that changed */ void fenced_send_device_notification(const char *op, const pcmk__action_result_t *result, const char *desc) { send_config_notification(op, result, desc, g_hash_table_size(device_list)); } /*! * \internal * \brief Send notifications for a topology level change to subscribed clients * * \param[in] op Notification type (STONITH_OP_LEVEL_ADD or * STONITH_OP_LEVEL_DEL) * \param[in] result Operation result * \param[in] desc String representation of level ([]) */ void fenced_send_level_notification(const char *op, const pcmk__action_result_t *result, const char *desc) { send_config_notification(op, result, desc, g_hash_table_size(topology)); } static void topology_remove_helper(const char *node, int level) { char *desc = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); crm_xml_add(data, F_STONITH_ORIGIN, __func__); crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); fenced_unregister_level(data, &desc, &result); fenced_send_level_notification(STONITH_OP_LEVEL_DEL, &result, desc); pcmk__reset_result(&result); free_xml(data); free(desc); } static void remove_cib_device(xmlXPathObjectPtr xpathObj) { int max = numXpathResults(xpathObj), lpc = 0; for (lpc = 0; lpc < max; lpc++) { const char *rsc_id = NULL; const char *standard = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match != NULL) { standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); } if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { continue; } rsc_id = crm_element_value(match, XML_ATTR_ID); stonith_device_remove(rsc_id, true); } } static void remove_topology_level(xmlNode *match) { int index = 0; char *key = NULL; CRM_CHECK(match != NULL, return); key = stonith_level_key(match, fenced_target_by_unknown); crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); topology_remove_helper(key, index); free(key); } static void add_topology_level(xmlNode *match) { char *desc = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(match != NULL, return); fenced_register_level(match, &desc, &result); fenced_send_level_notification(STONITH_OP_LEVEL_ADD, &result, desc); pcmk__reset_result(&result); free(desc); } static void remove_fencing_topology(xmlXPathObjectPtr xpathObj) { int max = numXpathResults(xpathObj), lpc = 0; for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if (match && crm_element_value(match, XML_DIFF_MARKER)) { /* Deletion */ int index = 0; char *target = stonith_level_key(match, fenced_target_by_unknown); crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); if (target == NULL) { crm_err("Invalid fencing target in element %s", ID(match)); } else if (index <= 0) { crm_err("Invalid level for %s in element %s", target, ID(match)); } else { topology_remove_helper(target, index); } /* } else { Deal with modifications during the 'addition' stage */ } } } static void register_fencing_topology(xmlXPathObjectPtr xpathObj) { int max = numXpathResults(xpathObj), lpc = 0; for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); remove_topology_level(match); add_topology_level(match); } } /* Fencing */ static void fencing_topology_init(void) { xmlXPathObjectPtr xpathObj = NULL; const char *xpath = "//" XML_TAG_FENCING_LEVEL; crm_trace("Full topology refresh"); free_topology_list(); init_topology_list(); /* Grab everything */ xpathObj = xpath_search(local_cib, xpath); register_fencing_topology(xpathObj); freeXpathObject(xpathObj); } #define rsc_name(x) x->clone_name?x->clone_name:x->id /*! * \internal * \brief Check whether our uname is in a resource's allowed node list * * \param[in] rsc Resource to check * * \return Pointer to node object if found, NULL otherwise */ static pe_node_t * our_node_allowed_for(const pe_resource_t *rsc) { GHashTableIter iter; pe_node_t *node = NULL; if (rsc && stonith_our_uname) { g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node && strcmp(node->details->uname, stonith_our_uname) == 0) { break; } node = NULL; } } return node; } static void watchdog_device_update(void) { if (stonith_watchdog_timeout_ms > 0) { if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) && !stonith_watchdog_targets) { /* getting here watchdog-fencing enabled, no device there yet and reason isn't stonith_watchdog_targets preventing that */ int rc; xmlNode *xml; xml = create_device_registration_xml( STONITH_WATCHDOG_ID, st_namespace_internal, STONITH_WATCHDOG_AGENT, NULL, /* stonith_device_register will add our own name as PCMK_STONITH_HOST_LIST param so we can skip that here */ NULL); rc = stonith_device_register(xml, TRUE); free_xml(xml); if (rc != pcmk_ok) { rc = pcmk_legacy2rc(rc); exit_code = CRM_EX_FATAL; crm_crit("Cannot register watchdog pseudo fence agent: %s", pcmk_rc_str(rc)); stonith_shutdown(0); } } } else if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) != NULL) { /* be silent if no device - todo parameter to stonith_device_remove */ stonith_device_remove(STONITH_WATCHDOG_ID, true); } } static void update_stonith_watchdog_timeout_ms(xmlNode *cib) { long timeout_ms = 0; xmlNode *stonith_watchdog_xml = NULL; const char *value = NULL; stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", cib, LOG_NEVER); if (stonith_watchdog_xml) { value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE); } if (value) { timeout_ms = crm_get_msec(value); } if (timeout_ms < 0) { timeout_ms = pcmk__auto_watchdog_timeout(); } stonith_watchdog_timeout_ms = timeout_ms; } /*! * \internal * \brief If a resource or any of its children are STONITH devices, update their * definitions given a cluster working set. * * \param[in,out] rsc Resource to check * \param[in,out] data_set Cluster working set with device information */ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) { pe_node_t *node = NULL; const char *value = NULL; const char *rclass = NULL; pe_node_t *parent = NULL; /* If this is a complex resource, check children rather than this resource itself. */ if(rsc->children) { GList *gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { cib_device_update(gIter->data, data_set); if(pe_rsc_is_clone(rsc)) { crm_trace("Only processing one copy of the clone %s", rsc->id); break; } } return; } /* We only care about STONITH resources. */ rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { return; } /* If this STONITH resource is disabled, remove it. */ if (pe__resource_is_disabled(rsc)) { crm_info("Device %s has been disabled", rsc->id); return; } /* if watchdog-fencing is disabled handle any watchdog-fence resource as if it was disabled */ if ((stonith_watchdog_timeout_ms <= 0) && pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) { crm_info("Watchdog-fencing disabled thus handling " "device %s as disabled", rsc->id); return; } /* Check whether our node is allowed for this resource (and its parent if in a group) */ node = our_node_allowed_for(rsc); if (rsc->parent && (rsc->parent->variant == pe_group)) { parent = our_node_allowed_for(rsc->parent); } if(node == NULL) { /* Our node is disallowed, so remove the device */ GHashTableIter iter; crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { crm_trace("Available: %s = %d", pe__node_name(node), node->weight); } return; } else if(node->weight < 0 || (parent && parent->weight < 0)) { /* Our node (or its group) is disallowed by score, so remove the device */ int score = (node->weight < 0)? node->weight : parent->weight; crm_info("Device %s has been disabled on %s: score=%s", rsc->id, stonith_our_uname, pcmk_readable_score(score)); return; } else { /* Our node is allowed, so update the device information */ int rc; xmlNode *data; GHashTable *rsc_params = NULL; GHashTableIter gIter; stonith_key_value_t *params = NULL; const char *name = NULL; const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE); const char *rsc_provides = NULL; crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight); rsc_params = pe_rsc_params(rsc, node, data_set); get_meta_attributes(rsc->meta, rsc, node, data_set); rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES); g_hash_table_iter_init(&gIter, rsc_params); while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) { if (!name || !value) { continue; } params = stonith_key_value_add(params, name, value); crm_trace(" %s=%s", name, value); } data = create_device_registration_xml(rsc_name(rsc), st_namespace_any, agent, params, rsc_provides); stonith_key_value_freeall(params, 1, 1); rc = stonith_device_register(data, TRUE); CRM_ASSERT(rc == pcmk_ok); free_xml(data); } } /*! * \internal * \brief Update all STONITH device definitions based on current CIB */ static void cib_devices_update(void) { GHashTableIter iter; stonith_device_t *device = NULL; crm_info("Updating devices to version %s.%s.%s", crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN), crm_element_value(local_cib, XML_ATTR_GENERATION), crm_element_value(local_cib, XML_ATTR_NUMUPDATES)); if (fenced_data_set->now != NULL) { crm_time_free(fenced_data_set->now); fenced_data_set->now = NULL; } fenced_data_set->localhost = stonith_our_uname; pcmk__schedule_actions(local_cib, data_set_flags, fenced_data_set); g_hash_table_iter_init(&iter, device_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { if (device->cib_registered) { device->dirty = TRUE; } } /* have list repopulated if cib has a watchdog-fencing-resource TODO: keep a cached list for queries happening while we are refreshing */ g_list_free_full(stonith_watchdog_targets, free); stonith_watchdog_targets = NULL; g_list_foreach(fenced_data_set->resources, (GFunc) cib_device_update, fenced_data_set); g_hash_table_iter_init(&iter, device_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) { if (device->dirty) { g_hash_table_iter_remove(&iter); } } fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it pe_reset_working_set(fenced_data_set); } static void update_cib_stonith_devices_v2(const char *event, xmlNode * msg) { xmlNode *change = NULL; char *reason = NULL; bool needs_update = FALSE; xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); for (change = pcmk__xml_first_child(patchset); change != NULL; change = pcmk__xml_next(change)) { const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); const char *shortpath = NULL; if ((op == NULL) || (strcmp(op, "move") == 0) || strstr(xpath, "/"XML_CIB_TAG_STATUS)) { continue; } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) { const char *rsc_id = NULL; char *search = NULL; char *mutable = NULL; if (strstr(xpath, XML_TAG_ATTR_SETS) || strstr(xpath, XML_TAG_META_SETS)) { needs_update = TRUE; pcmk__str_update(&reason, "(meta) attribute deleted from resource"); break; } pcmk__str_update(&mutable, xpath); rsc_id = strstr(mutable, "primitive[@" XML_ATTR_ID "=\'"); if (rsc_id != NULL) { rsc_id += strlen("primitive[@" XML_ATTR_ID "=\'"); search = strchr(rsc_id, '\''); } if (search != NULL) { *search = 0; stonith_device_remove(rsc_id, true); /* watchdog_device_update called afterwards to fall back to implicit definition if needed */ } else { crm_warn("Ignoring malformed CIB update (resource deletion)"); } free(mutable); } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) || strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) || strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) { shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath); reason = crm_strdup_printf("%s %s", op, shortpath+1); needs_update = TRUE; break; } } if(needs_update) { crm_info("Updating device list from CIB: %s", reason); cib_devices_update(); } else { crm_trace("No updates for device list found in CIB"); } free(reason); } static void update_cib_stonith_devices_v1(const char *event, xmlNode * msg) { const char *reason = "none"; gboolean needs_update = FALSE; xmlXPathObjectPtr xpath_obj = NULL; /* process new constraints */ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION); if (numXpathResults(xpath_obj) > 0) { int max = numXpathResults(xpath_obj), lpc = 0; /* Safest and simplest to always recompute */ needs_update = TRUE; reason = "new location constraint"; for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpath_obj, lpc); crm_log_xml_trace(match, "new constraint"); } } freeXpathObject(xpath_obj); /* process deletions */ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE); if (numXpathResults(xpath_obj) > 0) { remove_cib_device(xpath_obj); } freeXpathObject(xpath_obj); /* process additions */ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE); if (numXpathResults(xpath_obj) > 0) { int max = numXpathResults(xpath_obj), lpc = 0; for (lpc = 0; lpc < max; lpc++) { const char *rsc_id = NULL; const char *standard = NULL; xmlNode *match = getXpathResult(xpath_obj, lpc); rsc_id = crm_element_value(match, XML_ATTR_ID); standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { continue; } crm_trace("Fencing resource %s was added or modified", rsc_id); reason = "new resource"; needs_update = TRUE; } } freeXpathObject(xpath_obj); if(needs_update) { crm_info("Updating device list from CIB: %s", reason); cib_devices_update(); } } static void update_cib_stonith_devices(const char *event, xmlNode * msg) { int format = 1; xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); CRM_ASSERT(patchset); crm_element_value_int(patchset, "format", &format); switch(format) { case 1: update_cib_stonith_devices_v1(event, msg); break; case 2: update_cib_stonith_devices_v2(event, msg); break; default: crm_warn("Unknown patch format: %d", format); } } /*! * \internal * \brief Check whether a node has a specific attribute name/value * * \param[in] node Name of node to check * \param[in] name Name of an attribute to look for * \param[in] value The value the named attribute needs to be set to in order to be considered a match * * \return TRUE if the locally cached CIB has the specified node attribute */ gboolean node_has_attr(const char *node, const char *name, const char *value) { GString *xpath = NULL; xmlNode *match; CRM_CHECK((local_cib != NULL) && (node != NULL) && (name != NULL) && (value != NULL), return FALSE); /* Search for the node's attributes in the CIB. While the schema allows * multiple sets of instance attributes, and allows instance attributes to * use id-ref to reference values elsewhere, that is intended for resources, * so we ignore that here. */ xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE "[@" XML_ATTR_UNAME "='", node, "']/" XML_TAG_ATTR_SETS "/" XML_CIB_TAG_NVPAIR "[@" XML_NVPAIR_ATTR_NAME "='", name, "' " "and @" XML_NVPAIR_ATTR_VALUE "='", value, "']", NULL); match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER); g_string_free(xpath, TRUE); return (match != NULL); } /*! * \internal * \brief Check whether a node does watchdog-fencing * * \param[in] node Name of node to check * * \return TRUE if node found in stonith_watchdog_targets * or stonith_watchdog_targets is empty indicating * all nodes are doing watchdog-fencing */ gboolean node_does_watchdog_fencing(const char *node) { return ((stonith_watchdog_targets == NULL) || pcmk__str_in_list(node, stonith_watchdog_targets, pcmk__str_casei)); } static void update_fencing_topology(const char *event, xmlNode * msg) { int format = 1; const char *xpath; xmlXPathObjectPtr xpathObj = NULL; xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); CRM_ASSERT(patchset); crm_element_value_int(patchset, "format", &format); if(format == 1) { /* Process deletions (only) */ xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL; xpathObj = xpath_search(msg, xpath); remove_fencing_topology(xpathObj); freeXpathObject(xpathObj); /* Process additions and changes */ xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL; xpathObj = xpath_search(msg, xpath); register_fencing_topology(xpathObj); freeXpathObject(xpathObj); } else if(format == 2) { xmlNode *change = NULL; int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; xml_patch_versions(patchset, add, del); for (change = pcmk__xml_first_child(patchset); change != NULL; change = pcmk__xml_next(change)) { const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); if(op == NULL) { continue; } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) { /* Change to a specific entry */ crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); if(strcmp(op, "move") == 0) { continue; } else if(strcmp(op, "create") == 0) { add_topology_level(change->children); } else if(strcmp(op, "modify") == 0) { xmlNode *match = first_named_child(change, XML_DIFF_RESULT); if(match) { remove_topology_level(match->children); add_topology_level(match->children); } } else if(strcmp(op, "delete") == 0) { /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */ crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) { /* Change to the topology in general */ crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) { /* Changes to the whole config section, possibly including the topology as a whild */ if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) { crm_trace("Nothing for us in %s operation %d.%d.%d for %s.", op, add[0], add[1], add[2], xpath); } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) { crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } } else { crm_trace("Nothing for us in %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); } } } else { crm_warn("Unknown patch format: %d", format); } } static bool have_cib_devices = FALSE; static void update_cib_cache_cb(const char *event, xmlNode * msg) { int rc = pcmk_ok; long timeout_ms_saved = stonith_watchdog_timeout_ms; bool need_full_refresh = false; if(!have_cib_devices) { crm_trace("Skipping updates until we get a full dump"); return; } else if(msg == NULL) { crm_trace("Missing %s update", event); return; } /* Maintain a local copy of the CIB so that we have full access * to device definitions, location constraints, and node attributes */ if (local_cib != NULL) { int rc = pcmk_ok; xmlNode *patchset = NULL; crm_element_value_int(msg, F_CIB_RC, &rc); if (rc != pcmk_ok) { return; } patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); pcmk__xml_log_patchset(LOG_TRACE, patchset); rc = xml_apply_patchset(local_cib, patchset, TRUE); switch (rc) { case pcmk_ok: case -pcmk_err_old_data: break; case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc); free_xml(local_cib); local_cib = NULL; break; default: crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc); free_xml(local_cib); local_cib = NULL; } } if (local_cib == NULL) { crm_trace("Re-requesting full CIB"); rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call); if(rc != pcmk_ok) { crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc); return; } CRM_ASSERT(local_cib != NULL); need_full_refresh = true; } pcmk__refresh_node_caches_from_cib(local_cib); update_stonith_watchdog_timeout_ms(local_cib); if (timeout_ms_saved != stonith_watchdog_timeout_ms) { need_full_refresh = true; } if (need_full_refresh) { fencing_topology_init(); cib_devices_update(); } else { // Partial refresh update_fencing_topology(event, msg); update_cib_stonith_devices(event, msg); } watchdog_device_update(); } static void init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { crm_info("Updating device list from CIB"); have_cib_devices = TRUE; local_cib = copy_xml(output); pcmk__refresh_node_caches_from_cib(local_cib); update_stonith_watchdog_timeout_ms(local_cib); fencing_topology_init(); cib_devices_update(); watchdog_device_update(); } static void stonith_shutdown(int nsig) { crm_info("Terminating with %d clients", pcmk__ipc_client_count()); stonith_shutdown_flag = TRUE; if (mainloop != NULL && g_main_loop_is_running(mainloop)) { g_main_loop_quit(mainloop); } } static void cib_connection_destroy(gpointer user_data) { if (stonith_shutdown_flag) { crm_info("Connection to the CIB manager closed"); return; } else { crm_crit("Lost connection to the CIB manager, shutting down"); } if (cib_api) { cib_api->cmds->signoff(cib_api); } stonith_shutdown(0); } static void stonith_cleanup(void) { if (cib_api) { cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb); cib_api->cmds->signoff(cib_api); } if (ipcs) { qb_ipcs_destroy(ipcs); } crm_peer_destroy(); pcmk__client_cleanup(); free_stonith_remote_op_list(); free_topology_list(); free_device_list(); free_metadata_cache(); fenced_unregister_handlers(); free(stonith_our_uname); stonith_our_uname = NULL; free_xml(local_cib); local_cib = NULL; } static gboolean stand_alone_cpg_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { stand_alone = FALSE; options.no_cib_connect = true; return TRUE; } static void setup_cib(void) { int rc, retries = 0; cib_api = cib_new(); if (cib_api == NULL) { crm_err("No connection to the CIB manager"); return; } do { sleep(retries); rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command); } while (rc == -ENOTCONN && ++retries < 5); if (rc != pcmk_ok) { crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc); } else if (pcmk_ok != cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) { crm_err("Could not set CIB notification callback"); } else { rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local); cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb", init_cib_cache_cb); cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy); crm_info("Watching for fencing topology changes"); } } struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = st_ipc_accept, .connection_created = NULL, .msg_process = st_ipc_dispatch, .connection_closed = st_ipc_closed, .connection_destroyed = st_ipc_destroy }; /*! * \internal * \brief Callback for peer status changes * * \param[in] type What changed * \param[in] node What peer had the change * \param[in] data Previous value of what changed */ static void st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data) { if ((type != crm_status_processes) && !pcmk_is_set(node->flags, crm_remote_node)) { /* * This is a hack until we can send to a nodeid and/or we fix node name lookups * These messages are ignored in stonith_peer_callback() */ xmlNode *query = create_xml_node(NULL, "stonith_command"); crm_xml_add(query, F_XML_TAGNAME, "stonith_command"); crm_xml_add(query, F_TYPE, T_STONITH_NG); crm_xml_add(query, F_STONITH_OPERATION, "poke"); crm_debug("Broadcasting our uname because of node %u", node->id); send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE); free_xml(query); } } static pcmk__cluster_option_t fencer_options[] = { /* name, old name, type, allowed values, * default value, validator, * short description, * long description */ { PCMK_STONITH_HOST_ARGUMENT, NULL, "string", NULL, "port", NULL, N_("Advanced use only: An alternate parameter to supply instead of 'port'"), N_("some devices do not support the " "standard 'port' parameter or may provide additional ones. Use " "this to specify an alternate, device-specific, parameter " "that should indicate the machine to be fenced. A value of " "none can be used to tell the cluster not to supply any " "additional parameters.") }, { PCMK_STONITH_HOST_MAP,NULL, "string", NULL, "", NULL, N_("A mapping of host names to ports numbers for devices that do not support host names."), N_("Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2") }, { PCMK_STONITH_HOST_LIST,NULL, "string", NULL, "", NULL, N_("Eg. node1,node2,node3"), N_("A list of machines controlled by " "this device (Optional unless pcmk_host_list=static-list)") }, { PCMK_STONITH_HOST_CHECK,NULL, "string", NULL, "dynamic-list", NULL, N_("How to determine which machines are controlled by the device."), N_("Allowed values: dynamic-list " "(query the device via the 'list' command), static-list " "(check the pcmk_host_list attribute), status " "(query the device via the 'status' command), " "none (assume every device can fence every " "machine)") }, { PCMK_STONITH_DELAY_MAX,NULL, "time", NULL, "0s", NULL, N_("Enable a base delay for fencing actions and specify base delay value."), N_("Enable a delay of no more than the " "time specified before executing fencing actions. Pacemaker " "derives the overall delay by taking the value of " "pcmk_delay_base and adding a random delay value such " "that the sum is kept below this maximum.") }, { PCMK_STONITH_DELAY_BASE,NULL, "string", NULL, "0s", NULL, N_("Enable a base delay for " "fencing actions and specify base delay value."), N_("This enables a static delay for " "fencing actions, which can help avoid \"death matches\" where " "two nodes try to fence each other at the same time. If " "pcmk_delay_max is also used, a random delay will be " "added such that the total delay is kept below that value." "This can be set to a single time value to apply to any node " "targeted by this device (useful if a separate device is " "configured for each target), or to a node map (for example, " "\"node1:1s;node2:5\") to set a different value per target.") }, { PCMK_STONITH_ACTION_LIMIT,NULL, "integer", NULL, "1", NULL, N_("The maximum number of actions can be performed in parallel on this device"), N_("Cluster property concurrent-fencing=true needs to be configured first." "Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.") }, { "pcmk_reboot_action",NULL, "string", NULL, "reboot", NULL, N_("Advanced use only: An alternate command to run instead of 'reboot'"), N_("Some devices do not support the standard commands or may provide additional ones.\n" "Use this to specify an alternate, device-specific, command that implements the \'reboot\' action.") }, { "pcmk_reboot_timeout",NULL, "time", NULL, "60s", NULL, N_("Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout"), N_("Some devices need much more/less time to complete than normal." "Use this to specify an alternate, device-specific, timeout for \'reboot\' actions.") }, { "pcmk_reboot_retries",NULL, "integer", NULL, "2", NULL, N_("Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period"), N_("Some devices do not support multiple connections." " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." " Use this option to alter the number of times Pacemaker retries \'reboot\' actions before giving up.") }, { "pcmk_off_action",NULL, "string", NULL, "off", NULL, N_("Advanced use only: An alternate command to run instead of \'off\'"), N_("Some devices do not support the standard commands or may provide additional ones." "Use this to specify an alternate, device-specific, command that implements the \'off\' action.") }, { "pcmk_off_timeout",NULL, "time", NULL, "60s", NULL, N_("Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout"), N_("Some devices need much more/less time to complete than normal." "Use this to specify an alternate, device-specific, timeout for \'off\' actions.") }, { "pcmk_off_retries",NULL, "integer", NULL, "2", NULL, N_("Advanced use only: The maximum number of times to retry the 'off' command within the timeout period"), N_("Some devices do not support multiple connections." " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." " Use this option to alter the number of times Pacemaker retries \'off\' actions before giving up.") }, { "pcmk_on_action",NULL, "string", NULL, "on", NULL, N_("Advanced use only: An alternate command to run instead of 'on'"), N_("Some devices do not support the standard commands or may provide additional ones." "Use this to specify an alternate, device-specific, command that implements the \'on\' action.") }, { "pcmk_on_timeout",NULL, "time", NULL, "60s", NULL, N_("Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout"), N_("Some devices need much more/less time to complete than normal." "Use this to specify an alternate, device-specific, timeout for \'on\' actions.") }, { "pcmk_on_retries",NULL, "integer", NULL, "2", NULL, N_("Advanced use only: The maximum number of times to retry the 'on' command within the timeout period"), N_("Some devices do not support multiple connections." " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." " Use this option to alter the number of times Pacemaker retries \'on\' actions before giving up.") }, { "pcmk_list_action",NULL, "string", NULL, "list", NULL, N_("Advanced use only: An alternate command to run instead of \'list\'"), N_("Some devices do not support the standard commands or may provide additional ones." "Use this to specify an alternate, device-specific, command that implements the \'list\' action.") }, { "pcmk_list_timeout",NULL, "time", NULL, "60s", NULL, N_("Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout"), N_("Some devices need much more/less time to complete than normal." "Use this to specify an alternate, device-specific, timeout for \'list\' actions.") }, { "pcmk_list_retries",NULL, "integer", NULL, "2", NULL, N_("Advanced use only: The maximum number of times to retry the \'list\' command within the timeout period"), N_("Some devices do not support multiple connections." " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." " Use this option to alter the number of times Pacemaker retries \'list\' actions before giving up.") }, { "pcmk_monitor_action",NULL, "string", NULL, "monitor", NULL, N_("Advanced use only: An alternate command to run instead of \'monitor\'"), N_("Some devices do not support the standard commands or may provide additional ones." "Use this to specify an alternate, device-specific, command that implements the \'monitor\' action.") }, { "pcmk_monitor_timeout",NULL, "time", NULL, "60s", NULL, N_("Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout"), N_("Some devices need much more/less time to complete than normal.\n" "Use this to specify an alternate, device-specific, timeout for \'monitor\' actions.") }, { "pcmk_monitor_retries",NULL, "integer", NULL, "2", NULL, N_("Advanced use only: The maximum number of times to retry the \'monitor\' command within the timeout period"), N_("Some devices do not support multiple connections." " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." " Use this option to alter the number of times Pacemaker retries \'monitor\' actions before giving up.") }, { "pcmk_status_action",NULL, "string", NULL, "status", NULL, N_("Advanced use only: An alternate command to run instead of \'status\'"), N_("Some devices do not support the standard commands or may provide additional ones." "Use this to specify an alternate, device-specific, command that implements the \'status\' action.") }, { "pcmk_status_timeout",NULL, "time", NULL, "60s", NULL, N_("Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout"), N_("Some devices need much more/less time to complete than normal." "Use this to specify an alternate, device-specific, timeout for \'status\' actions.") }, { "pcmk_status_retries",NULL, "integer", NULL, "2", NULL, N_("Advanced use only: The maximum number of times to retry the \'status\' command within the timeout period"), N_("Some devices do not support multiple connections." " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." " Use this option to alter the number of times Pacemaker retries \'status\' actions before giving up.") }, }; void fencer_metadata(void) { const char *desc_short = N_("Instance attributes available for all " "\"stonith\"-class resources"); const char *desc_long = N_("Instance attributes available for all \"stonith\"-" "class resources and used by Pacemaker's fence " "daemon, formerly known as stonithd"); gchar *s = pcmk__format_option_metadata("pacemaker-fenced", desc_short, desc_long, fencer_options, PCMK__NELEM(fencer_options)); printf("%s", s); g_free(s); } static GOptionEntry entries[] = { { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone, "Deprecated (will be removed in a future release)", NULL }, { "stand-alone-w-cpg", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, stand_alone_cpg_cb, "Intended for use in regression testing only", NULL }, { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY, &options.log_files, "Send logs to the additional named logfile", NULL }, { NULL } }; static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; context = pcmk__build_arg_context(args, "text (default), xml", group, "[metadata]"); pcmk__add_main_args(context, entries); return context; } int main(int argc, char **argv) { int rc = pcmk_rc_ok; crm_cluster_t *cluster = NULL; crm_ipc_t *old_instance = NULL; GError *error = NULL; GOptionGroup *output_group = NULL; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); gchar **processed_args = pcmk__cmdline_preproc(argv, "l"); GOptionContext *context = build_arg_context(args, &output_group); crm_log_preinit(NULL, argc, argv); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); goto done; } if (args->version) { out->version(out, false); goto done; } if ((g_strv_length(processed_args) >= 2) && pcmk__str_eq(processed_args[1], "metadata", pcmk__str_none)) { fencer_metadata(); goto done; } // Open additional log files - if (options.log_files != NULL) { - for (gchar **fname = options.log_files; *fname != NULL; fname++) { - rc = pcmk__add_logfile(*fname); - - if (rc != pcmk_rc_ok) { - out->err(out, "Logging to %s is disabled: %s", - *fname, pcmk_rc_str(rc)); - } - } - } + pcmk__add_logfiles(options.log_files, out); crm_log_init(NULL, LOG_INFO + args->verbosity, TRUE, (args->verbosity > 0), argc, argv, FALSE); crm_notice("Starting Pacemaker fencer"); old_instance = crm_ipc_new("stonith-ng", 0); if (old_instance == NULL) { /* crm_ipc_new() will have already logged an error message with * crm_err() */ exit_code = CRM_EX_FATAL; goto done; } if (crm_ipc_connect(old_instance)) { // IPC endpoint already up crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_err("pacemaker-fenced is already active, aborting startup"); goto done; } else { // Not up or not authentic, we'll proceed either way crm_ipc_destroy(old_instance); old_instance = NULL; } mainloop_add_signal(SIGTERM, stonith_shutdown); crm_peer_init(); fenced_data_set = pe_new_working_set(); CRM_ASSERT(fenced_data_set != NULL); cluster = pcmk_cluster_new(); if (!stand_alone) { if (is_corosync_cluster()) { #if SUPPORT_COROSYNC cluster->destroy = stonith_peer_cs_destroy; cluster->cpg.cpg_deliver_fn = stonith_peer_ais_callback; cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; #endif } crm_set_status_callback(&st_peer_update_callback); if (crm_cluster_connect(cluster) == FALSE) { exit_code = CRM_EX_FATAL; crm_crit("Cannot sign in to the cluster... terminating"); goto done; } pcmk__str_update(&stonith_our_uname, cluster->uname); if (!options.no_cib_connect) { setup_cib(); } } else { pcmk__str_update(&stonith_our_uname, "localhost"); crm_warn("Stand-alone mode is deprecated and will be removed " "in a future release"); } init_device_list(); init_topology_list(); pcmk__serve_fenced_ipc(&ipcs, &ipc_callbacks); if (pcmk__log_output_new(&logger_out) != pcmk_rc_ok) { exit_code = CRM_EX_FATAL; goto done; } pe__register_messages(logger_out); pcmk__register_lib_messages(logger_out); pcmk__output_set_log_level(logger_out, LOG_TRACE); fenced_data_set->priv = logger_out; // Create the mainloop and run it... mainloop = g_main_loop_new(NULL, FALSE); crm_notice("Pacemaker fencer successfully started and accepting connections"); g_main_loop_run(mainloop); done: g_strfreev(processed_args); pcmk__free_arg_context(context); g_strfreev(options.log_files); stonith_cleanup(); pcmk_cluster_free(cluster); pe_free_working_set(fenced_data_set); pcmk__output_and_clear_error(error, out); if (logger_out != NULL) { logger_out->finish(logger_out, exit_code, true, NULL); pcmk__output_free(logger_out); } if (out != NULL) { out->finish(out, exit_code, true, NULL); pcmk__output_free(out); } pcmk__unregister_formats(); crm_exit(exit_code); } diff --git a/include/crm/common/logging_internal.h b/include/crm/common/logging_internal.h index d1c30fb4b9..479dcab8b4 100644 --- a/include/crm/common/logging_internal.h +++ b/include/crm/common/logging_internal.h @@ -1,91 +1,95 @@ /* * Copyright 2015-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #ifdef __cplusplus extern "C" { #endif #ifndef PCMK__LOGGING_INTERNAL_H # define PCMK__LOGGING_INTERNAL_H +# include + # include +# include /*! * \internal * \brief Log a configuration error * * \param[in] fmt printf(3)-style format string * \param[in] ... Arguments for format string */ # define pcmk__config_err(fmt...) do { \ crm_config_error = TRUE; \ crm_err(fmt); \ } while (0) /*! * \internal * \brief Log a configuration warning * * \param[in] fmt printf(3)-style format string * \param[in] ... Arguments for format string */ # define pcmk__config_warn(fmt...) do { \ crm_config_warning = TRUE; \ crm_warn(fmt); \ } while (0) /*! * \internal * \brief Execute code depending on whether trace logging is enabled * * This is similar to \p do_crm_log_unlikely() except instead of logging, it * selects one of two code blocks to execute. * * \param[in] if_action Code block to execute if trace logging is enabled * \param[in] else_action Code block to execute if trace logging is not enabled * * \note Neither \p if_action nor \p else_action can contain a \p break or * \p continue statement. */ # define pcmk__if_tracing(if_action, else_action) do { \ static struct qb_log_callsite *trace_cs = NULL; \ \ if (trace_cs == NULL) { \ trace_cs = qb_log_callsite_get(__func__, __FILE__, \ "if_tracing", LOG_TRACE, \ __LINE__, crm_trace_nonlog); \ } \ if (crm_is_callsite_active(trace_cs, LOG_TRACE, \ crm_trace_nonlog)) { \ if_action; \ } else { \ else_action; \ } \ } while (0) /*! * \internal * \brief Initialize logging for command line tools * * \param[in] name The name of the program * \param[in] verbosity How verbose to be in logging * * \note \p verbosity is not the same as the logging level (LOG_ERR, etc.). */ void pcmk__cli_init_logging(const char *name, unsigned int verbosity); int pcmk__add_logfile(const char *filename); +void pcmk__add_logfiles(gchar **log_files, pcmk__output_t *out); void pcmk__free_common_logger(void); #ifdef __cplusplus } #endif #endif diff --git a/lib/common/logging.c b/lib/common/logging.c index 6b57ecd14e..dded87307a 100644 --- a/lib/common/logging.c +++ b/lib/common/logging.c @@ -1,1167 +1,1192 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Use high-resolution (millisecond) timestamps if libqb supports them #ifdef QB_FEATURE_LOG_HIRES_TIMESTAMPS #define TIMESTAMP_FORMAT_SPEC "%%T" typedef struct timespec *log_time_t; #else #define TIMESTAMP_FORMAT_SPEC "%%t" typedef time_t log_time_t; #endif unsigned int crm_log_level = LOG_INFO; unsigned int crm_trace_nonlog = 0; bool pcmk__is_daemon = false; char *pcmk__our_nodename = NULL; static unsigned int crm_log_priority = LOG_NOTICE; static GLogFunc glib_log_default = NULL; static pcmk__output_t *logger_out = NULL; static gboolean crm_tracing_enabled(void); static void crm_glib_handler(const gchar * log_domain, GLogLevelFlags flags, const gchar * message, gpointer user_data) { int log_level = LOG_WARNING; GLogLevelFlags msg_level = (flags & G_LOG_LEVEL_MASK); static struct qb_log_callsite *glib_cs = NULL; if (glib_cs == NULL) { glib_cs = qb_log_callsite_get(__func__, __FILE__, "glib-handler", LOG_DEBUG, __LINE__, crm_trace_nonlog); } switch (msg_level) { case G_LOG_LEVEL_CRITICAL: log_level = LOG_CRIT; if (!crm_is_callsite_active(glib_cs, LOG_DEBUG, crm_trace_nonlog)) { /* log and record how we got here */ crm_abort(__FILE__, __func__, __LINE__, message, TRUE, TRUE); } break; case G_LOG_LEVEL_ERROR: log_level = LOG_ERR; break; case G_LOG_LEVEL_MESSAGE: log_level = LOG_NOTICE; break; case G_LOG_LEVEL_INFO: log_level = LOG_INFO; break; case G_LOG_LEVEL_DEBUG: log_level = LOG_DEBUG; break; case G_LOG_LEVEL_WARNING: case G_LOG_FLAG_RECURSION: case G_LOG_FLAG_FATAL: case G_LOG_LEVEL_MASK: log_level = LOG_WARNING; break; } do_crm_log(log_level, "%s: %s", log_domain, message); } #ifndef NAME_MAX # define NAME_MAX 256 #endif /*! * \internal * \brief Write out a blackbox (enabling blackboxes if needed) * * \param[in] nsig Signal number that was received * * \note This is a true signal handler, and so must be async-safe. */ static void crm_trigger_blackbox(int nsig) { if(nsig == SIGTRAP) { /* Turn it on if it wasn't already */ crm_enable_blackbox(nsig); } crm_write_blackbox(nsig, NULL); } void crm_log_deinit(void) { if (glib_log_default != NULL) { g_log_set_default_handler(glib_log_default, NULL); } } #define FMT_MAX 256 /*! * \internal * \brief Set the log format string based on the passed-in method * * \param[in] method The detail level of the log output * \param[in] daemon The daemon ID included in error messages * \param[in] use_pid Cached result of getpid() call, for efficiency * \param[in] use_nodename Cached result of uname() call, for efficiency * */ /* XXX __attribute__((nonnull)) for use_nodename parameter */ static void set_format_string(int method, const char *daemon, pid_t use_pid, const char *use_nodename) { if (method == QB_LOG_SYSLOG) { // The system log gets a simplified, user-friendly format crm_extended_logging(method, QB_FALSE); qb_log_format_set(method, "%g %p: %b"); } else { // Everything else gets more detail, for advanced troubleshooting int offset = 0; char fmt[FMT_MAX]; if (method > QB_LOG_STDERR) { // If logging to file, prefix with timestamp, node name, daemon ID offset += snprintf(fmt + offset, FMT_MAX - offset, TIMESTAMP_FORMAT_SPEC " %s %-20s[%lu] ", use_nodename, daemon, (unsigned long) use_pid); } // Add function name (in parentheses) offset += snprintf(fmt + offset, FMT_MAX - offset, "(%%n"); if (crm_tracing_enabled()) { // When tracing, add file and line number offset += snprintf(fmt + offset, FMT_MAX - offset, "@%%f:%%l"); } offset += snprintf(fmt + offset, FMT_MAX - offset, ")"); // Add tag (if any), severity, and actual message offset += snprintf(fmt + offset, FMT_MAX - offset, " %%g\t%%p: %%b"); CRM_LOG_ASSERT(offset > 0); qb_log_format_set(method, fmt); } } #define DEFAULT_LOG_FILE CRM_LOG_DIR "/pacemaker.log" static bool logfile_disabled(const char *filename) { return pcmk__str_eq(filename, PCMK__VALUE_NONE, pcmk__str_casei) || pcmk__str_eq(filename, "/dev/null", pcmk__str_none); } /*! * \internal * \brief Fix log file ownership if group is wrong or doesn't have access * * \param[in] filename Log file name (for logging only) * \param[in] logfd Log file descriptor * * \return Standard Pacemaker return code */ static int chown_logfile(const char *filename, int logfd) { uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; struct stat st; int rc; // Get the log file's current ownership and permissions if (fstat(logfd, &st) < 0) { return errno; } // Any other errors don't prevent file from being used as log rc = pcmk_daemon_user(&pcmk_uid, &pcmk_gid); if (rc != pcmk_ok) { rc = pcmk_legacy2rc(rc); crm_warn("Not changing '%s' ownership because user information " "unavailable: %s", filename, pcmk_rc_str(rc)); return pcmk_rc_ok; } if ((st.st_gid == pcmk_gid) && ((st.st_mode & S_IRWXG) == (S_IRGRP|S_IWGRP))) { return pcmk_rc_ok; } if (fchown(logfd, pcmk_uid, pcmk_gid) < 0) { crm_warn("Couldn't change '%s' ownership to user %s gid %d: %s", filename, CRM_DAEMON_USER, pcmk_gid, strerror(errno)); } return pcmk_rc_ok; } // Reset log file permissions (using environment variable if set) static void chmod_logfile(const char *filename, int logfd) { const char *modestr = getenv("PCMK_logfile_mode"); mode_t filemode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; if (modestr != NULL) { long filemode_l = strtol(modestr, NULL, 8); if ((filemode_l != LONG_MIN) && (filemode_l != LONG_MAX)) { filemode = (mode_t) filemode_l; } } if ((filemode != 0) && (fchmod(logfd, filemode) < 0)) { crm_warn("Couldn't change '%s' mode to %04o: %s", filename, filemode, strerror(errno)); } } // If we're root, correct a log file's permissions if needed static int set_logfile_permissions(const char *filename, FILE *logfile) { if (geteuid() == 0) { int logfd = fileno(logfile); int rc = chown_logfile(filename, logfd); if (rc != pcmk_rc_ok) { return rc; } chmod_logfile(filename, logfd); } return pcmk_rc_ok; } // Enable libqb logging to a new log file static void enable_logfile(int fd) { qb_log_ctl(fd, QB_LOG_CONF_ENABLED, QB_TRUE); #if 0 qb_log_ctl(fd, QB_LOG_CONF_FILE_SYNC, 1); // Turn on synchronous writes #endif #ifdef HAVE_qb_log_conf_QB_LOG_CONF_MAX_LINE_LEN // Longer than default, for logging long XML lines qb_log_ctl(fd, QB_LOG_CONF_MAX_LINE_LEN, 800); #endif crm_update_callsites(); } static inline void disable_logfile(int fd) { qb_log_ctl(fd, QB_LOG_CONF_ENABLED, QB_FALSE); } static void setenv_logfile(const char *filename) { // Some resource agents will log only if environment variable is set if (pcmk__env_option(PCMK__ENV_LOGFILE) == NULL) { pcmk__set_env_option(PCMK__ENV_LOGFILE, filename); } } /*! * \brief Add a file to be used as a Pacemaker detail log * * \param[in] filename Name of log file to use * * \return Standard Pacemaker return code */ int pcmk__add_logfile(const char *filename) { /* No log messages from this function will be logged to the new log! * If another target such as syslog has already been added, the messages * should show up there. */ int fd = 0; int rc = pcmk_rc_ok; FILE *logfile = NULL; bool is_default = false; static int default_fd = -1; static bool have_logfile = false; // Use default if caller didn't specify (and we don't already have one) if (filename == NULL) { if (have_logfile) { return pcmk_rc_ok; } filename = DEFAULT_LOG_FILE; } // If the user doesn't want logging, we're done if (logfile_disabled(filename)) { return pcmk_rc_ok; } // If the caller wants the default and we already have it, we're done is_default = pcmk__str_eq(filename, DEFAULT_LOG_FILE, pcmk__str_none); if (is_default && (default_fd >= 0)) { return pcmk_rc_ok; } // Check whether we have write access to the file logfile = fopen(filename, "a"); if (logfile == NULL) { rc = errno; crm_warn("Logging to '%s' is disabled: %s " CRM_XS " uid=%u gid=%u", filename, strerror(rc), geteuid(), getegid()); return rc; } rc = set_logfile_permissions(filename, logfile); if (rc != pcmk_rc_ok) { crm_warn("Logging to '%s' is disabled: %s " CRM_XS " permissions", filename, strerror(rc)); fclose(logfile); return rc; } // Close and reopen as libqb logging target fclose(logfile); fd = qb_log_file_open(filename); if (fd < 0) { crm_warn("Logging to '%s' is disabled: %s " CRM_XS " qb_log_file_open", filename, strerror(-fd)); return -fd; // == +errno } if (is_default) { default_fd = fd; setenv_logfile(filename); } else if (default_fd >= 0) { crm_notice("Switching logging to %s", filename); disable_logfile(default_fd); } crm_notice("Additional logging available in %s", filename); enable_logfile(fd); have_logfile = true; return pcmk_rc_ok; } +/*! + * \brief Add multiple additional log files + * + * \param[in] log_files Array of log files to add + * \param[in] out Output object to use for error reporting + * + * \return Standard Pacemaker return code + */ +void +pcmk__add_logfiles(gchar **log_files, pcmk__output_t *out) +{ + if (log_files == NULL) { + return; + } + + for (gchar **fname = log_files; *fname != NULL; fname++) { + int rc = pcmk__add_logfile(*fname); + + if (rc != pcmk_rc_ok) { + out->err(out, "Logging to %s is disabled: %s", + *fname, pcmk_rc_str(rc)); + } + } +} + static int blackbox_trigger = 0; static volatile char *blackbox_file_prefix = NULL; static void blackbox_logger(int32_t t, struct qb_log_callsite *cs, log_time_t timestamp, const char *msg) { if(cs && cs->priority < LOG_ERR) { crm_write_blackbox(SIGTRAP, cs); /* Bypass the over-dumping logic */ } else { crm_write_blackbox(0, cs); } } static void crm_control_blackbox(int nsig, bool enable) { int lpc = 0; if (blackbox_file_prefix == NULL) { pid_t pid = getpid(); blackbox_file_prefix = crm_strdup_printf("%s/%s-%lu", CRM_BLACKBOX_DIR, crm_system_name, (unsigned long) pid); } if (enable && qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_SIZE, 5 * 1024 * 1024); /* Any size change drops existing entries */ qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_TRUE); /* Setting the size seems to disable it */ /* Enable synchronous logging */ for (lpc = QB_LOG_BLACKBOX; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_FILE_SYNC, QB_TRUE); } crm_notice("Initiated blackbox recorder: %s", blackbox_file_prefix); /* Save to disk on abnormal termination */ crm_signal_handler(SIGSEGV, crm_trigger_blackbox); crm_signal_handler(SIGABRT, crm_trigger_blackbox); crm_signal_handler(SIGILL, crm_trigger_blackbox); crm_signal_handler(SIGBUS, crm_trigger_blackbox); crm_signal_handler(SIGFPE, crm_trigger_blackbox); crm_update_callsites(); blackbox_trigger = qb_log_custom_open(blackbox_logger, NULL, NULL, NULL); qb_log_ctl(blackbox_trigger, QB_LOG_CONF_ENABLED, QB_TRUE); crm_trace("Trigger: %d is %d %d", blackbox_trigger, qb_log_ctl(blackbox_trigger, QB_LOG_CONF_STATE_GET, 0), QB_LOG_STATE_ENABLED); crm_update_callsites(); } else if (!enable && qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_STATE_GET, 0) == QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); /* Disable synchronous logging again when the blackbox is disabled */ for (lpc = QB_LOG_BLACKBOX; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_FILE_SYNC, QB_FALSE); } } } void crm_enable_blackbox(int nsig) { crm_control_blackbox(nsig, TRUE); } void crm_disable_blackbox(int nsig) { crm_control_blackbox(nsig, FALSE); } /*! * \internal * \brief Write out a blackbox, if blackboxes are enabled * * \param[in] nsig Signal that was received * \param[in] cs libqb callsite * * \note This may be called via a true signal handler and so must be async-safe. * @TODO actually make this async-safe */ void crm_write_blackbox(int nsig, const struct qb_log_callsite *cs) { static volatile int counter = 1; static volatile time_t last = 0; char buffer[NAME_MAX]; time_t now = time(NULL); if (blackbox_file_prefix == NULL) { return; } switch (nsig) { case 0: case SIGTRAP: /* The graceful case - such as assertion failure or user request */ if (nsig == 0 && now == last) { /* Prevent over-dumping */ return; } snprintf(buffer, NAME_MAX, "%s.%d", blackbox_file_prefix, counter++); if (nsig == SIGTRAP) { crm_notice("Blackbox dump requested, please see %s for contents", buffer); } else if (cs) { syslog(LOG_NOTICE, "Problem detected at %s:%d (%s), please see %s for additional details", cs->function, cs->lineno, cs->filename, buffer); } else { crm_notice("Problem detected, please see %s for additional details", buffer); } last = now; qb_log_blackbox_write_to_file(buffer); /* Flush the existing contents * A size change would also work */ qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_TRUE); break; default: /* Do as little as possible, just try to get what we have out * We logged the filename when the blackbox was enabled */ crm_signal_handler(nsig, SIG_DFL); qb_log_blackbox_write_to_file((const char *)blackbox_file_prefix); qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); raise(nsig); break; } } static const char * crm_quark_to_string(uint32_t tag) { const char *text = g_quark_to_string(tag); if (text) { return text; } return ""; } static void crm_log_filter_source(int source, const char *trace_files, const char *trace_fns, const char *trace_fmts, const char *trace_tags, const char *trace_blackbox, struct qb_log_callsite *cs) { if (qb_log_ctl(source, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { return; } else if (cs->tags != crm_trace_nonlog && source == QB_LOG_BLACKBOX) { /* Blackbox gets everything if enabled */ qb_bit_set(cs->targets, source); } else if (source == blackbox_trigger && blackbox_trigger > 0) { /* Should this log message result in the blackbox being dumped */ if (cs->priority <= LOG_ERR) { qb_bit_set(cs->targets, source); } else if (trace_blackbox) { char *key = crm_strdup_printf("%s:%d", cs->function, cs->lineno); if (strstr(trace_blackbox, key) != NULL) { qb_bit_set(cs->targets, source); } free(key); } } else if (source == QB_LOG_SYSLOG) { /* No tracing to syslog */ if (cs->priority <= crm_log_priority && cs->priority <= crm_log_level) { qb_bit_set(cs->targets, source); } /* Log file tracing options... */ } else if (cs->priority <= crm_log_level) { qb_bit_set(cs->targets, source); } else if (trace_files && strstr(trace_files, cs->filename) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_fns && strstr(trace_fns, cs->function) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_fmts && strstr(trace_fmts, cs->format) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_tags && cs->tags != 0 && cs->tags != crm_trace_nonlog && g_quark_to_string(cs->tags) != NULL) { qb_bit_set(cs->targets, source); } } static void crm_log_filter(struct qb_log_callsite *cs) { int lpc = 0; static int need_init = 1; static const char *trace_fns = NULL; static const char *trace_tags = NULL; static const char *trace_fmts = NULL; static const char *trace_files = NULL; static const char *trace_blackbox = NULL; if (need_init) { need_init = 0; trace_fns = getenv("PCMK_trace_functions"); trace_fmts = getenv("PCMK_trace_formats"); trace_tags = getenv("PCMK_trace_tags"); trace_files = getenv("PCMK_trace_files"); trace_blackbox = getenv("PCMK_trace_blackbox"); if (trace_tags != NULL) { uint32_t tag; char token[500]; const char *offset = NULL; const char *next = trace_tags; do { offset = next; next = strchrnul(offset, ','); snprintf(token, sizeof(token), "%.*s", (int)(next - offset), offset); tag = g_quark_from_string(token); crm_info("Created GQuark %u from token '%s' in '%s'", tag, token, trace_tags); if (next[0] != 0) { next++; } } while (next != NULL && next[0] != 0); } } cs->targets = 0; /* Reset then find targets to enable */ for (lpc = QB_LOG_SYSLOG; lpc < QB_LOG_TARGET_MAX; lpc++) { crm_log_filter_source(lpc, trace_files, trace_fns, trace_fmts, trace_tags, trace_blackbox, cs); } } gboolean crm_is_callsite_active(struct qb_log_callsite *cs, uint8_t level, uint32_t tags) { gboolean refilter = FALSE; if (cs == NULL) { return FALSE; } if (cs->priority != level) { cs->priority = level; refilter = TRUE; } if (cs->tags != tags) { cs->tags = tags; refilter = TRUE; } if (refilter) { crm_log_filter(cs); } if (cs->targets == 0) { return FALSE; } return TRUE; } void crm_update_callsites(void) { static gboolean log = TRUE; if (log) { log = FALSE; crm_debug ("Enabling callsites based on priority=%d, files=%s, functions=%s, formats=%s, tags=%s", crm_log_level, getenv("PCMK_trace_files"), getenv("PCMK_trace_functions"), getenv("PCMK_trace_formats"), getenv("PCMK_trace_tags")); } qb_log_filter_fn_set(crm_log_filter); } static gboolean crm_tracing_enabled(void) { if (crm_log_level == LOG_TRACE) { return TRUE; } else if (getenv("PCMK_trace_files") || getenv("PCMK_trace_functions") || getenv("PCMK_trace_formats") || getenv("PCMK_trace_tags")) { return TRUE; } return FALSE; } static int crm_priority2int(const char *name) { struct syslog_names { const char *name; int priority; }; static struct syslog_names p_names[] = { {"emerg", LOG_EMERG}, {"alert", LOG_ALERT}, {"crit", LOG_CRIT}, {"error", LOG_ERR}, {"warning", LOG_WARNING}, {"notice", LOG_NOTICE}, {"info", LOG_INFO}, {"debug", LOG_DEBUG}, {NULL, -1} }; int lpc; for (lpc = 0; name != NULL && p_names[lpc].name != NULL; lpc++) { if (pcmk__str_eq(p_names[lpc].name, name, pcmk__str_none)) { return p_names[lpc].priority; } } return crm_log_priority; } /*! * \internal * \brief Set the identifier for the current process * * If the identifier crm_system_name is not already set, then it is set as follows: * - it is passed to the function via the "entity" parameter, or * - it is derived from the executable name * * The identifier can be used in logs, IPC, and more. * * This method also sets the PCMK_service environment variable. * * \param[in] entity If not NULL, will be assigned to the identifier * \param[in] argc The number of command line parameters * \param[in] argv The command line parameter values */ static void set_identity(const char *entity, int argc, char *const *argv) { if (crm_system_name != NULL) { return; // Already set, don't overwrite } if (entity != NULL) { crm_system_name = strdup(entity); } else if ((argc > 0) && (argv != NULL)) { char *mutable = strdup(argv[0]); char *modified = basename(mutable); if (strstr(modified, "lt-") == modified) { modified += 3; } crm_system_name = strdup(modified); free(mutable); } else { crm_system_name = strdup("Unknown"); } CRM_ASSERT(crm_system_name != NULL); setenv("PCMK_service", crm_system_name, 1); } void crm_log_preinit(const char *entity, int argc, char *const *argv) { /* Configure libqb logging with nothing turned on */ struct utsname res; int lpc = 0; int32_t qb_facility = 0; pid_t pid = getpid(); const char *nodename = "localhost"; static bool have_logging = false; if (have_logging) { return; } have_logging = true; crm_xml_init(); /* Sets buffer allocation strategy */ if (crm_trace_nonlog == 0) { crm_trace_nonlog = g_quark_from_static_string("Pacemaker non-logging tracepoint"); } umask(S_IWGRP | S_IWOTH | S_IROTH); /* Redirect messages from glib functions to our handler */ glib_log_default = g_log_set_default_handler(crm_glib_handler, NULL); /* and for good measure... - this enum is a bit field (!) */ g_log_set_always_fatal((GLogLevelFlags) 0); /*value out of range */ /* Set crm_system_name, which is used as the logging name. It may also * be used for other purposes such as an IPC client name. */ set_identity(entity, argc, argv); qb_facility = qb_log_facility2int("local0"); qb_log_init(crm_system_name, qb_facility, LOG_ERR); crm_log_level = LOG_CRIT; /* Nuke any syslog activity until it's asked for */ qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_FALSE); #ifdef HAVE_qb_log_conf_QB_LOG_CONF_MAX_LINE_LEN // Shorter than default, generous for what we *should* send to syslog qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_MAX_LINE_LEN, 256); #endif if (uname(memset(&res, 0, sizeof(res))) == 0 && *res.nodename != '\0') { nodename = res.nodename; } /* Set format strings and disable threading * Pacemaker and threads do not mix well (due to the amount of forking) */ qb_log_tags_stringify_fn_set(crm_quark_to_string); for (lpc = QB_LOG_SYSLOG; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_THREADED, QB_FALSE); #ifdef HAVE_qb_log_conf_QB_LOG_CONF_ELLIPSIS // End truncated lines with '...' qb_log_ctl(lpc, QB_LOG_CONF_ELLIPSIS, QB_TRUE); #endif set_format_string(lpc, crm_system_name, pid, nodename); } #ifdef ENABLE_NLS /* Enable translations (experimental). Currently we only have a few * proof-of-concept translations for some option help. The goal would be to * offer translations for option help and man pages rather than logs or * documentation, to reduce the burden of maintaining them. */ // Load locale information for the local host from the environment setlocale(LC_ALL, ""); // Tell gettext where to find Pacemaker message catalogs CRM_ASSERT(bindtextdomain(PACKAGE, PCMK__LOCALE_DIR) != NULL); // Tell gettext to use the Pacemaker message catalogs CRM_ASSERT(textdomain(PACKAGE) != NULL); // Tell gettext that the translated strings are stored in UTF-8 bind_textdomain_codeset(PACKAGE, "UTF-8"); #endif } gboolean crm_log_init(const char *entity, uint8_t level, gboolean daemon, gboolean to_stderr, int argc, char **argv, gboolean quiet) { const char *syslog_priority = NULL; const char *facility = pcmk__env_option(PCMK__ENV_LOGFACILITY); const char *f_copy = facility; pcmk__is_daemon = daemon; crm_log_preinit(entity, argc, argv); if (level > LOG_TRACE) { level = LOG_TRACE; } if(level > crm_log_level) { crm_log_level = level; } /* Should we log to syslog */ if (facility == NULL) { if (pcmk__is_daemon) { facility = "daemon"; } else { facility = PCMK__VALUE_NONE; } pcmk__set_env_option(PCMK__ENV_LOGFACILITY, facility); } if (pcmk__str_eq(facility, PCMK__VALUE_NONE, pcmk__str_casei)) { quiet = TRUE; } else { qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_FACILITY, qb_log_facility2int(facility)); } if (pcmk__env_option_enabled(crm_system_name, PCMK__ENV_DEBUG)) { /* Override the default setting */ crm_log_level = LOG_DEBUG; } /* What lower threshold do we have for sending to syslog */ syslog_priority = pcmk__env_option(PCMK__ENV_LOGPRIORITY); if (syslog_priority) { crm_log_priority = crm_priority2int(syslog_priority); } qb_log_filter_ctl(QB_LOG_SYSLOG, QB_LOG_FILTER_ADD, QB_LOG_FILTER_FILE, "*", crm_log_priority); // Log to syslog unless requested to be quiet if (!quiet) { qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_TRUE); } /* Should we log to stderr */ if (pcmk__env_option_enabled(crm_system_name, PCMK__ENV_STDERR)) { /* Override the default setting */ to_stderr = TRUE; } crm_enable_stderr(to_stderr); // Log to a file if we're a daemon or user asked for one { const char *logfile = pcmk__env_option(PCMK__ENV_LOGFILE); if (!pcmk__str_eq(PCMK__VALUE_NONE, logfile, pcmk__str_casei) && (pcmk__is_daemon || (logfile != NULL))) { // Daemons always get a log file, unless explicitly set to "none" pcmk__add_logfile(logfile); } } if (pcmk__is_daemon && pcmk__env_option_enabled(crm_system_name, PCMK__ENV_BLACKBOX)) { crm_enable_blackbox(0); } /* Summary */ crm_trace("Quiet: %d, facility %s", quiet, f_copy); pcmk__env_option(PCMK__ENV_LOGFILE); pcmk__env_option(PCMK__ENV_LOGFACILITY); crm_update_callsites(); /* Ok, now we can start logging... */ // Disable daemon request if user isn't root or Pacemaker daemon user if (pcmk__is_daemon) { const char *user = getenv("USER"); if (user != NULL && !pcmk__strcase_any_of(user, "root", CRM_DAEMON_USER, NULL)) { crm_trace("Not switching to corefile directory for %s", user); pcmk__is_daemon = false; } } if (pcmk__is_daemon) { int user = getuid(); struct passwd *pwent = getpwuid(user); if (pwent == NULL) { crm_perror(LOG_ERR, "Cannot get name for uid: %d", user); } else if (!pcmk__strcase_any_of(pwent->pw_name, "root", CRM_DAEMON_USER, NULL)) { crm_trace("Don't change active directory for regular user: %s", pwent->pw_name); } else if (chdir(CRM_CORE_DIR) < 0) { crm_perror(LOG_INFO, "Cannot change active directory to " CRM_CORE_DIR); } else { crm_info("Changed active directory to " CRM_CORE_DIR); } /* Original meanings from signal(7) * * Signal Value Action Comment * SIGTRAP 5 Core Trace/breakpoint trap * SIGUSR1 30,10,16 Term User-defined signal 1 * SIGUSR2 31,12,17 Term User-defined signal 2 * * Our usage is as similar as possible */ mainloop_add_signal(SIGUSR1, crm_enable_blackbox); mainloop_add_signal(SIGUSR2, crm_disable_blackbox); mainloop_add_signal(SIGTRAP, crm_trigger_blackbox); } else if (!quiet) { crm_log_args(argc, argv); } return TRUE; } /* returns the old value */ unsigned int set_crm_log_level(unsigned int level) { unsigned int old = crm_log_level; if (level > LOG_TRACE) { level = LOG_TRACE; } crm_log_level = level; crm_update_callsites(); crm_trace("New log level: %d", level); return old; } void crm_enable_stderr(int enable) { if (enable && qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_ENABLED, QB_TRUE); crm_update_callsites(); } else if (enable == FALSE) { qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_ENABLED, QB_FALSE); } } /*! * \brief Make logging more verbose * * If logging to stderr is not already enabled when this function is called, * enable it. Otherwise, increase the log level by 1. * * \param[in] argc Ignored * \param[in] argv Ignored */ void crm_bump_log_level(int argc, char **argv) { if (qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { crm_enable_stderr(TRUE); } else { set_crm_log_level(crm_log_level + 1); } } unsigned int get_crm_log_level(void) { return crm_log_level; } /*! * \brief Log the command line (once) * * \param[in] Number of values in \p argv * \param[in] Command-line arguments (including command name) * * \note This function will only log once, even if called with different * arguments. */ void crm_log_args(int argc, char **argv) { static bool logged = false; gchar *arg_string = NULL; if ((argc == 0) || (argv == NULL) || logged) { return; } logged = true; arg_string = g_strjoinv(" ", argv); crm_notice("Invoked: %s", arg_string); g_free(arg_string); } void crm_log_output_fn(const char *file, const char *function, int line, int level, const char *prefix, const char *output) { const char *next = NULL; const char *offset = NULL; if (level == LOG_NEVER) { return; } if (output == NULL) { if (level != LOG_STDOUT) { level = LOG_TRACE; } output = "-- empty --"; } next = output; do { offset = next; next = strchrnul(offset, '\n'); do_crm_log_alias(level, file, function, line, "%s [ %.*s ]", prefix, (int)(next - offset), offset); if (next[0] != 0) { next++; } } while (next != NULL && next[0] != 0); } void pcmk__cli_init_logging(const char *name, unsigned int verbosity) { crm_log_init(name, LOG_ERR, FALSE, FALSE, 0, NULL, TRUE); for (int i = 0; i < verbosity; i++) { /* These arguments are ignored, so pass placeholders. */ crm_bump_log_level(0, NULL); } } /*! * \brief Log XML line-by-line in a formatted fashion * * \param[in] level Priority at which to log the messages * \param[in] text Prefix for each line * \param[in] xml XML to log * * \note This does nothing when \p level is \p LOG_STDOUT. * \note Do not call this function directly. It should be called only from the * \p do_crm_log_xml() macro. */ void pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml) { if (xml == NULL) { do_crm_log(level, "%s%sNo data to dump as XML", pcmk__s(text, ""), pcmk__str_empty(text)? "" : " "); } else { if (logger_out == NULL) { CRM_CHECK(pcmk__log_output_new(&logger_out) == pcmk_rc_ok, return); } pcmk__output_set_log_level(logger_out, level); pcmk__xml_show(logger_out, text, xml, 1, pcmk__xml_fmt_pretty |pcmk__xml_fmt_open |pcmk__xml_fmt_children |pcmk__xml_fmt_close); } } /*! * \internal * \brief Free the logging library's internal log output object */ void pcmk__free_common_logger(void) { if (logger_out != NULL) { logger_out->finish(logger_out, CRM_EX_OK, true, NULL); pcmk__output_free(logger_out); logger_out = NULL; } } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include gboolean crm_log_cli_init(const char *entity) { pcmk__cli_init_logging(entity, 0); return TRUE; } gboolean crm_add_logfile(const char *filename) { return pcmk__add_logfile(filename) == pcmk_rc_ok; } // LCOV_EXCL_STOP // End deprecated API diff --git a/python/pacemaker/_cts/Makefile.am b/python/pacemaker/_cts/Makefile.am index eb3a08dfe4..bb2fe50d48 100644 --- a/python/pacemaker/_cts/Makefile.am +++ b/python/pacemaker/_cts/Makefile.am @@ -1,17 +1,18 @@ # # Copyright 2023 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # MAINTAINERCLEANFILES = Makefile.in pkgpythondir = $(pythondir)/$(PACKAGE)/_cts pkgpython_PYTHON = __init__.py \ + corosync.py \ errors.py \ process.py \ test.py diff --git a/python/pacemaker/_cts/corosync.py b/python/pacemaker/_cts/corosync.py new file mode 100644 index 0000000000..9ffa3ebd19 --- /dev/null +++ b/python/pacemaker/_cts/corosync.py @@ -0,0 +1,169 @@ +""" A module providing functions for manipulating corosync """ + +__all__ = ["Corosync", "localname"] +__copyright__ = "Copyright 2009-2023 the Pacemaker project contributors" +__license__ = "LGPLv2.1+" + +import os +import subprocess +import time + +from pacemaker.buildoptions import BuildOptions +from pacemaker._cts.process import killall, stdout_from_command + + +AUTOGEN_COROSYNC_TEMPLATE = """ +totem { + version: 2 + cluster_name: %s + crypto_cipher: none + crypto_hash: none + transport: udp +} + +nodelist { + node { + nodeid: 1 + name: %s + ring0_addr: 127.0.0.1 + } +} + +logging { + debug: off + to_syslog: no + to_stderr: no + to_logfile: yes + logfile: %s +} +""" + + +def corosync_cfg_exists(): + """ Does the corosync config file exist? """ + + return os.path.exists(BuildOptions.COROSYNC_CONFIG_FILE) + + +def corosync_log_file(cfgfile): + """ Where does corosync log to? """ + + with open(cfgfile, "r", encoding="utf-8") as f: + for line in f.readlines(): + # "to_logfile:" could also be in the config file, so check for a + # slash to make sure it's a path we're looking at. + if "logfile: /" in line: + return line.split()[-1] + + return None + + +def generate_corosync_cfg(logdir, cluster_name, node_name): + """ Generate the corosync config file, if it does not already exist """ + + if corosync_cfg_exists(): + return False + + logfile = os.path.join(logdir, "corosync.log") + + with open(BuildOptions.COROSYNC_CONFIG_FILE, "w", encoding="utf-8") as corosync_cfg: + corosync_cfg.write(AUTOGEN_COROSYNC_TEMPLATE % (cluster_name, node_name, logfile)) + + return True + + +def localname(): + """ Return the uname of the local host """ + + our_uname = stdout_from_command(["uname", "-n"]) + if our_uname: + our_uname = our_uname[0] + else: + our_uname = "localhost" + + return our_uname + + +class Corosync: + """ A class for managing corosync processes and config files """ + + def __init__(self, verbose, logdir, cluster_name): + """ Create a new Corosync instance. + + Arguments: + + verbose -- Whether to print the corosync log file + logdir -- The base directory under which to store log files + cluster_name -- The name of the cluster + """ + + self.verbose = verbose + self.logdir = logdir + self.cluster_name = cluster_name + + self._generated_cfg_file = False + + def _ready(self, logfile, timeout=10): + """ Is corosync ready to go? """ + + i = 0 + + while i < timeout: + with open(logfile, "r", encoding="utf-8") as corosync_log: + for line in corosync_log.readlines(): + if line.endswith("ready to provide service.\n"): + # Even once the line is in the log file, we may still need to wait just + # a little bit longer before corosync is really ready to go. + time.sleep(1) + return + + time.sleep(1) + i += 1 + + raise TimeoutError + + def start(self, kill_first=False, timeout=10): + """ Start the corosync process + + Arguments: + + kill_first -- Whether to kill any pre-existing corosync processes before + starting a new one + timeout -- If corosync does not start within this many seconds, raise + TimeoutError + """ + + if kill_first: + killall(["corosync"]) + + self._generated_cfg_file = generate_corosync_cfg(self.logdir, + self.cluster_name, localname()) + logfile = corosync_log_file(BuildOptions.COROSYNC_CONFIG_FILE) + + if self.verbose: + print("Starting corosync") + + with subprocess.Popen("corosync", stdout=subprocess.PIPE) as test: + test.wait() + + # Wait for corosync to be ready before returning + self._ready(logfile, timeout=timeout) + + def stop(self): + """ Stop the corosync process """ + + killall(["corosync"]) + + # If we did not write out the corosync config file, don't do anything else. + if not self._generated_cfg_file: + return + + if self.verbose: + print("Corosync output") + + logfile = corosync_log_file(BuildOptions.COROSYNC_CONFIG_FILE) + with open(logfile, "r", encoding="utf-8") as corosync_log: + for line in corosync_log.readlines(): + print(line.strip()) + + os.remove(BuildOptions.COROSYNC_CONFIG_FILE) diff --git a/python/pacemaker/_cts/test.py b/python/pacemaker/_cts/test.py index 0648c2b912..eebb3db533 100644 --- a/python/pacemaker/_cts/test.py +++ b/python/pacemaker/_cts/test.py @@ -1,486 +1,585 @@ """ A module providing base classes for defining regression tests and groups of regression tests. Everything exported here should be considered an abstract class that needs to be subclassed in order to do anything useful. Various functions will raise NotImplementedError if not overridden by a subclass. """ __copyright__ = "Copyright 2009-2023 the Pacemaker project contributors" __license__ = "LGPLv2.1+" __all__ = ["Test", "Tests"] import io import os import re import shlex +import signal import subprocess import sys import time from pacemaker._cts.errors import ExitCodeError, OutputFoundError, OutputNotFoundError, XmlValidationError from pacemaker._cts.process import pipe_communicate from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus def find_validator(rng_file): """ Return the command line used to validate XML output, or None if the validator is not installed. """ if os.access("/usr/bin/xmllint", os.X_OK): if rng_file is None: return ["xmllint", "-"] return ["xmllint", "--relaxng", rng_file, "-"] return None def rng_directory(): """ Which directory contains the RNG schema files? """ if "PCMK_schema_directory" in os.environ: return os.environ["PCMK_schema_directory"] if os.path.exists("%s/cts-fencing.in" % sys.path[0]): return "xml" return BuildOptions.SCHEMA_DIR +class Pattern: + """ A class for checking log files for a given pattern """ + + def __init__(self, pat, negative=False, regex=False): + """ Create a new Pattern instance + + Arguments: + + pat -- The string to search for + negative -- If True, pat must not be found in any input + regex -- If True, pat is a regex and not a substring + """ + + self._pat = pat + self.negative = negative + self.regex = regex + + def __str__(self): + return self._pat + + def match(self, line): + """ Is this pattern found in the given line? """ + + if self.regex: + return re.search(self._pat, line) is not None + + return self._pat in line + + class Test: """ The base class for a single regression test. A single regression test may still run multiple commands as part of its execution. """ def __init__(self, name, description, **kwargs): """ Create a new Test instance. This method must be provided by all subclasses, which must call Test.__init__ first. Arguments: description -- A user-readable description of the test, helpful in identifying what test is running or has failed. name -- The name of the test. Command line tools use this attribute to allow running only tests with the exact name, or tests whose name matches a given pattern. This should be unique among all tests. Keyword arguments: force_wait -- logdir -- The base directory under which to create a directory to store output and temporary data. timeout -- How long to wait for the test to complete. verbose -- Whether to print additional information, including verbose command output and daemon log files. """ self.description = description self.executed = False self.name = name self.force_wait = kwargs.get("force_wait", False) self.logdir = kwargs.get("logdir", "/tmp") self.timeout = kwargs.get("timeout", 2) self.verbose = kwargs.get("verbose", False) self._cmds = [] + self._patterns = [] + self._daemon_location = None + self._daemon_output = "" + self._daemon_process = None + self._result_exitcode = ExitStatus.OK self._result_txt = "" - self._stonith_process = None ### ### PROPERTIES ### @property def exitcode(self): """ The final exitcode of the Test. If all commands pass, this property will be ExitStatus.OK. Otherwise, this property will be the exitcode of the first command to fail. """ return self._result_exitcode @exitcode.setter def exitcode(self, value): self._result_exitcode = value @property def logpath(self): """ The path to the log for whatever daemon is being tested. Note that this requires all subclasses to set self._daemon_location before accessing this property or an exception will be raised. """ return os.path.join(self.logdir, self._daemon_location + ".log") ### ### PRIVATE METHODS ### def _kill_daemons(self): """ Kill any running daemons in preparation for executing the test """ raise NotImplementedError("_kill_daemons not provided by subclass") - def _match_patterns(self): + def _match_log_patterns(self): """ Check test output for expected patterns, setting self.exitcode and self._result_txt as appropriate. Not all subclass will need to do this. """ - # pylint: disable=no-self-use - return + if len(self._patterns) == 0: + return + + n_failed_matches = 0 + n_negative_matches = 0 + + output = self._daemon_output.split("\n") + + for pat in self._patterns: + positive_match = False + + for line in output: + if pat.match(line): + if pat.negative: + n_negative_matches += 1 + + if self.verbose: + print("This pattern should not have matched = '%s" % pat) + + break + + positive_match = True + break + + if not pat.negative and not positive_match: + n_failed_matches += 1 + print("Pattern Not Matched = '%s'" % pat) + + if n_failed_matches > 0 or n_negative_matches > 0: + msg = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." + self._result_txt = msg % (self.name, n_failed_matches, len(self._patterns), n_negative_matches) + self.exitcode = ExitStatus.ERROR + def _new_cmd(self, cmd, args, exitcode, **kwargs): """ Add a command to be executed as part of this test. Arguments: cmd -- The program to run. args -- Commands line arguments to pass to cmd, as a string. exitcode -- The expected exit code of cmd. This can be used to run a command that is expected to fail. Keyword arguments: stdout_match -- If not None, a string that is expected to be present in the stdout of cmd. This can be a regular expression. no_wait -- Do not wait for cmd to complete. stdout_negative_match -- If not None, a string that is expected to be missing in the stdout of cmd. This can be a regualr expression. kill -- A command to be run after cmd, typically in order to kill a failed process. This should be the entire command line including arguments as a single string. validate -- If True, the output of cmd will be passed to xmllint for validation. If validation fails, XmlValidationError will be raised. check_rng -- If True and validate is True, command output will additionally be checked against the api-result.rng file. check_stderr -- If True, the stderr of cmd will be included in output. """ self._cmds.append( { "args": args, "check_rng": kwargs.get("check_rng", True), "check_stderr": kwargs.get("check_stderr", True), "cmd": cmd, "expected_exitcode": exitcode, "kill": kwargs.get("kill", None), "no_wait": kwargs.get("no_wait", False), "stdout_match": kwargs.get("stdout_match", None), "stdout_negative_match": kwargs.get("stdout_negative_match", None), "validate": kwargs.get("validate", True), } ) def _start_daemons(self): """ Start any necessary daemons in preparation for executing the test """ raise NotImplementedError("_start_daemons not provided by subclass") ### ### PUBLIC METHODS ### def add_cmd(self, cmd, args, validate=True, check_rng=True, check_stderr=True): """ Add a simple command to be executed as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK, validate=validate, check_rng=check_rng, check_stderr=check_stderr) def add_cmd_and_kill(self, cmd, args, kill_proc): """ Add a command and system command to be executed as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK, kill=kill_proc) def add_cmd_check_stdout(self, cmd, args, match, no_match=None): """ Add a simple command with expected output to be executed as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK, stdout_match=match, stdout_negative_match=no_match) def add_cmd_expected_fail(self, cmd, args, exitcode=ExitStatus.ERROR): """ Add a command that is expected to fail to be executed as part of this test """ self._new_cmd(cmd, args, exitcode) def add_cmd_no_wait(self, cmd, args): """ Add a simple command to be executed (without waiting) as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK, no_wait=True) + def add_log_pattern(self, pattern, negative=False, regex=False): + """ Add a pattern that should appear in the test's logs """ + + self._patterns.append(Pattern(pattern, negative=negative, regex=regex)) + def clean_environment(self): """ Clean up the host after executing a test """ - raise NotImplementedError("clean_environment not provided by subclass") + + if self._daemon_process: + if self._daemon_process.poll() is None: + self._daemon_process.terminate() + self._daemon_process.wait() + else: + return_code = { + getattr(signal, _signame): _signame + for _signame in dir(signal) + if _signame.startswith('SIG') and not _signame.startswith("SIG_") + }.get(-self._daemon_process.returncode, "RET=%d" % (self._daemon_process.returncode)) + msg = "FAILURE - '%s' failed. %s abnormally exited during test (%s)." + self._result_txt = msg % (self.name, self._daemon_location, return_code) + self.exitcode = ExitStatus.ERROR + + self._daemon_process = None + self._daemon_output = "" + + # the default for utf-8 encoding would error out if e.g. memory corruption + # makes fenced output any kind of 8 bit value - while still interesting + # for debugging and we'd still like the regression-test to go over the + # full set of test-cases + with open(self.logpath, 'rt', encoding = "ISO-8859-1") as logfile: + for line in logfile.readlines(): + self._daemon_output += line + + if self.verbose: + print("Daemon Output Start") + print(self._daemon_output) + print("Daemon Output End") def print_result(self, filler): """ Print the result of the last test execution """ print("%s%s" % (filler, self._result_txt)) def run(self): """ Execute this test """ i = 1 self.start_environment() if self.verbose: print("\n--- START TEST - %s" % self.name) self._result_txt = "SUCCESS - '%s'" % (self.name) self.exitcode = ExitStatus.OK for cmd in self._cmds: try: self.run_cmd(cmd) except ExitCodeError as e: print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode'])) self.set_error(i, cmd) break except OutputNotFoundError as e: print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e)) self.set_error(i, cmd) break except OutputFoundError as e: print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_negative_match'], e)) self.set_error(i, cmd) break except XmlValidationError as e: print("Step %d FAILED - xmllint failed: %s" % (i, e)) self.set_error(i, cmd) break if self.verbose: print("Step %d SUCCESS" % (i)) i = i + 1 self.clean_environment() if self.exitcode == ExitStatus.OK: - self._match_patterns() + self._match_log_patterns() print(self._result_txt) if self.verbose: print("--- END TEST - %s\n" % self.name) self.executed = True def run_cmd(self, args): """ Execute a command as part of this test """ cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print("\n\nRunning: %s" % " ".join(cmd)) # FIXME: Using "with" here breaks fencing merge tests. # pylint: disable=consider-using-with test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if args['kill']: if self.verbose: print("Also running: %s" % args['kill']) ### Typically, the kill argument is used to detect some sort of ### failure. Without yielding for a few seconds here, the process ### launched earlier that is listening for the failure may not have ### time to connect to pacemaker-execd. time.sleep(2) subprocess.Popen(shlex.split(args['kill'])) if not args['no_wait']: test.wait() else: return ExitStatus.OK output = pipe_communicate(test, check_stderr=args['check_stderr']) if self.verbose: print(output) if test.returncode != args['expected_exitcode']: raise ExitCodeError(test.returncode) if args['stdout_match'] is not None and \ re.search(args['stdout_match'], output) is None: raise OutputNotFoundError(output) if args['stdout_negative_match'] is not None and \ re.search(args['stdout_negative_match'], output) is not None: raise OutputFoundError(output) if args['validate']: if args['check_rng']: rng_file = rng_directory() + "/api/api-result.rng" else: rng_file = None cmd = find_validator(rng_file) if not cmd: raise XmlValidationError("Could not find validator for %s" % rng_file) if self.verbose: print("\nRunning: %s" % " ".join(cmd)) with subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as validator: output = pipe_communicate(validator, check_stderr=True, stdin=output) if self.verbose: print(output) if validator.returncode != 0: raise XmlValidationError(output) return ExitStatus.OK def set_error(self, step, cmd): """ Record failure of this test """ msg = "FAILURE - '%s' failed at step %d. Command: %s %s" self._result_txt = msg % (self.name, step, cmd['cmd'], cmd['args']) self.exitcode = ExitStatus.ERROR def start_environment(self): """ Prepare the host for executing a test """ if os.path.exists(self.logpath): os.remove(self.logpath) self._kill_daemons() self._start_daemons() logfile = None init_time = time.time() update_time = init_time while True: # FIXME: Eventually use 'with' here, which seems complicated given # everything happens in a loop. # pylint: disable=consider-using-with time.sleep(0.1) if not self.force_wait and logfile is None \ and os.path.exists(self.logpath): logfile = io.open(self.logpath, 'rt', encoding = "ISO-8859-1") if not self.force_wait and logfile is not None: for line in logfile.readlines(): if "successfully started" in line: return now = time.time() if self.timeout > 0 and (now - init_time) >= self.timeout: if not self.force_wait: print("\tDaemon %s doesn't seem to have been initialized within %fs." "\n\tConsider specifying a longer '--timeout' value." %(self._daemon_location, self.timeout)) return if self.verbose and (now - update_time) >= 5: print("Waiting for %s to be initialized: %fs ..." %(self._daemon_location, now - init_time)) update_time = now class Tests: """ The base class for a collection of regression tests """ def __init__(self, **kwargs): """ Create a new Tests instance. This method must be provided by all subclasses, which must call Tests.__init__ first. Keywork arguments: force_wait -- logdir -- The base directory under which to create a directory to store output and temporary data. timeout -- How long to wait for the test to complete. verbose -- Whether to print additional information, including verbose command output and daemon log files. """ self.force_wait = kwargs.get("force_wait", False) self.logdir = kwargs.get("logdir", "/tmp") self.timeout = kwargs.get("timeout", 2) self.verbose = kwargs.get("verbose", False) self._tests = [] def exit(self): """ Exit (with error status code if any test failed) """ for test in self._tests: if not test.executed: continue if test.exitcode != ExitStatus.OK: sys.exit(ExitStatus.ERROR) sys.exit(ExitStatus.OK) def print_list(self): """ List all registered tests """ print("\n==== %d TESTS FOUND ====" % len(self._tests)) print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")) print("%35s - %s" % ("--------------------", "--------------------")) for test in self._tests: print("%35s - %s" % (test.name, test.description)) print("==== END OF LIST ====\n") def print_results(self): """ Print summary of results of executed tests """ failures = 0 success = 0 print("\n\n======= FINAL RESULTS ==========") print("\n--- FAILURE RESULTS:") for test in self._tests: if not test.executed: continue if test.exitcode != ExitStatus.OK: failures = failures + 1 test.print_result(" ") else: success = success + 1 if failures == 0: print(" None") print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)) def run_single(self, name): """ Run a single named test """ for test in self._tests: if test.name == name: test.run() break def run_tests(self): """ Run all tests """ for test in self._tests: test.run() def run_tests_matching(self, pattern): """ Run all tests whose name matches a pattern """ for test in self._tests: if test.name.count(pattern) != 0: test.run()