diff --git a/configure.ac b/configure.ac index 470f483b44..ee005f0cff 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2154 +1,2155 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2023 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08). dnl Once we can require that version, we can simplify this, and no longer dnl need ACLOCAL_AMFLAGS in Makefile.am. m4_ifdef([AC_CONFIG_MACRO_DIRS], [AC_CONFIG_MACRO_DIRS([m4])], [AC_CONFIG_MACRO_DIR([m4])]) AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([m4/version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" LT_CONFIG_LTDL_DIR([libltdl]) AC_CONFIG_AUX_DIR([libltdl/config]) AC_CANONICAL_HOST dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) dnl 1.13: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.13 foreign tar-ustar subdir-objects]) dnl Require minimum version of pkg-config PKG_PROG_PKG_CONFIG(0.27) AS_IF([test x"${PKG_CONFIG}" != x""], [], [AC_MSG_FAILURE([Could not find required build tool pkg-config (0.27 or later)])]) PKG_INSTALLDIR PKG_NOARCH_INSTALLDIR dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== dnl A particular compiler can be forced by setting the CC environment variable AC_PROG_CC dnl Use at least C99 if possible (automatic for autoconf >= 2.70) m4_version_prereq([2.70], [:], [AC_PROG_CC_STDC]) dnl C++ is not needed for build, just maintainer utilities AC_PROG_CXX dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs: dnl "The macro gl_EARLY must be called as soon as possible after verifying that dnl the C compiler is working. ... The core part of the gnulib checks are done dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL dnl as a dependency. gl_EARLY gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT # --enable-new-dtags: Use RUNPATH instead of RPATH. # It is necessary to have this done before libtool does linker detection. # See also: https://github.com/kronosnet/kronosnet/issues/107 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags], [AM_LDFLAGS=-Wl,--enable-new-dtags], [AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])]) AC_SUBST([AM_LDFLAGS]) saved_LDFLAGS="$LDFLAGS" LDFLAGS="$AM_LDFLAGS $LDFLAGS" LT_INIT([dlopen]) LDFLAGS="$saved_LDFLAGS" LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING([whether $CC supports $@]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } # expand_path_option $path_variable_name $default expand_path_option() { # The first argument is the variable *name* (not value) ac_path_varname="$1" # Get the original value of the variable ac_path_value=$(eval echo "\${${ac_path_varname}}") # Expand any literal variable expressions in the value so that we don't # end up with something like '${prefix}' in #defines etc. # # Autoconf deliberately leaves values unexpanded to allow overriding # the configure script choices in make commands (for example, # "make exec_prefix=/foo install"). No longer being able to do this seems # like no great loss. eval ac_path_value=$(eval echo "${ac_path_value}") # Use (expanded) default if necessary AS_IF([test x"${ac_path_value}" = x""], [eval ac_path_value=$(eval echo "$2")]) # Require a full path AS_CASE(["$ac_path_value"], [/*], [eval ${ac_path_varname}="$ac_path_value"], [*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])] ) } # yes_no_try $user_response $default DISABLED=0 REQUIRED=1 OPTIONAL=2 yes_no_try() { local value AS_IF([test x"$1" = x""], [value="$2"], [value="$1"]) AS_CASE(["`echo "$value" | tr '[A-Z]' '[a-z]'`"], [0|no|false|disable], [return $DISABLED], [1|yes|true|enable], [return $REQUIRED], [try|check], [return $OPTIONAL] ) AC_MSG_ERROR([Invalid option value "$value"]) } check_systemdsystemunitdir() { AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) test x"$systemdsystemunitdir" != x"" return $? } # # Fix the defaults of certain built-in variables so they can be used in our # custom argument defaults # AC_MSG_NOTICE([Sanitizing prefix: ${prefix}]) AS_IF([test x"$prefix" = x"NONE"], [ prefix=/usr dnl Fix default variables - "prefix" variable if not specified AS_IF([test x"$localstatedir" = x"\${prefix}/var"], [localstatedir="/var"]) AS_IF([test x"$sysconfdir" = x"\${prefix}/etc"], [sysconfdir="/etc"]) ]) AC_MSG_NOTICE([Sanitizing exec_prefix: ${exec_prefix}]) AS_CASE([$exec_prefix], [prefix|NONE], [exec_prefix=$prefix]) AC_MSG_NOTICE([Sanitizing libdir: ${libdir}]) AS_CASE([$libdir], [prefix|NONE], [ AC_MSG_CHECKING([which lib directory to use]) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" AS_IF([test -d ${trydir}], [ libdir=${trydir} break ]) done AC_MSG_RESULT([$libdir]) ]) dnl =============================================== dnl Configure Options dnl =============================================== dnl Actual library checks come later, but pkg-config can be used here to grab dnl external values to use as defaults for configure options dnl Per the autoconf docs, --enable-*/--disable-* options should control dnl features inherent to Pacemaker, while --with-*/--without-* options should dnl control the use of external software. However, --enable-*/--disable-* may dnl implicitly require additional external dependencies, and dnl --with-*/--without-* may implicitly enable or disable features, so the dnl line is blurry. dnl dnl We also use --with-* options for custom file, directory, and path dnl locations, since autoconf does not provide an option type for those. dnl --enable-* options: build process AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])] ) yes_no_try "$enable_quiet" "no" enable_quiet=$? AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@try@:>@])], ) yes_no_try "$enable_fatal_warnings" "try" enable_fatal_warnings=$? AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])] ) yes_no_try "$enable_hardening" "try" enable_hardening=$? dnl --enable-* options: features AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])] ) yes_no_try "$enable_systemd" "try" enable_systemd=$? AC_ARG_ENABLE([upstart], [AS_HELP_STRING([--enable-upstart], [enable support for managing resources via Upstart (deprecated) @<:@try@:>@])] ) yes_no_try "$enable_upstart" "try" enable_upstart=$? dnl --enable-* options: features inherent to Pacemaker AC_ARG_ENABLE([compat-2.0], [AS_HELP_STRING([--enable-compat-2.0], m4_normalize([ preserve certain output as it was in 2.0; this option will be available only for the lifetime of the 2.1 series @<:@no@:>@]))] ) yes_no_try "$enable_compat_2_0" "no" enable_compat_2_0=$? AS_IF([test $enable_compat_2_0 -ne $DISABLED], [ AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1], [Keep certain output compatible with 2.0 release series]) PCMK_FEATURES="$PCMK_FEATURES compat-2.0" ] ) # Add an option to create symlinks at the pre-2.0.0 daemon name locations, so # that users and tools can continue to invoke those names directly (e.g., for # meta-data). This option will be removed in a future release. AC_ARG_ENABLE([legacy-links], [AS_HELP_STRING([--enable-legacy-links], [add symlinks for old daemon names (deprecated) @<:@no@:>@])] ) yes_no_try "$enable_legacy_links" "no" enable_legacy_links=$? AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED]) # AM_GNU_GETTEXT calls AM_NLS which defines the nls option, but it defaults # to enabled. We override the definition of AM_NLS to flip the default and mark # it as experimental in the help text. AC_DEFUN([AM_NLS], [AC_MSG_CHECKING([whether NLS is requested]) AC_ARG_ENABLE([nls], [AS_HELP_STRING([--enable-nls], [use Native Language Support (experimental)])], USE_NLS=$enableval, USE_NLS=no) AC_MSG_RESULT([$USE_NLS]) AC_SUBST([USE_NLS])] ) AM_GNU_GETTEXT([external]) AM_GNU_GETTEXT_VERSION([0.18]) AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"]) dnl --with-* options: external software support, and custom locations dnl This argument is defined via an M4 macro so default can be a variable AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACEMAKER_VERSION="$withval" ], [ PACEMAKER_VERSION="$PACKAGE_VERSION" ])] ) VERSION_ARG(VERSION_NUMBER) # Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case # the user used --with-version. Unfortunately, this can only affect the # substitution variables and later uses in this file, not the config.h # constants, so we have to be careful to use only PACEMAKER_VERSION in C code. PACKAGE_VERSION=$PACEMAKER_VERSION VERSION=$PACEMAKER_VERSION # Detect highest API schema version (use git if available to list managed RNGs, # in case there are leftover schema files from an earlier build of a different # version, otherwise check all RNGs) API_VERSION=$({ git ls-files xml/api/*.rng 2>/dev/null || ls -1 xml/api/*.rng ; } dnl | sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' | sort -V | tail -1) AC_DEFINE_UNQUOTED([PCMK__API_VERSION], ["$API_VERSION"], [Highest API schema version]) # Re-run configure at next make if any RNG changes, to re-detect highest AC_SUBST([CONFIG_STATUS_DEPENDENCIES], [$(echo '$(wildcard $(top_srcdir)/xml/api/*.rng)')]) CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], m4_normalize([ address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))], [ BUG_URL="$withval" ] ) dnl --with-* options: features AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets @<:@no@:>@])] ) yes_no_try "$with_cibsecrets" "no" with_cibsecrets=$? AC_ARG_WITH([gnutls], [AS_HELP_STRING([--with-gnutls], [support Pacemaker Remote and remote-tls-port using GnuTLS @<:@try@:>@])] ) yes_no_try "$with_gnutls" "try" with_gnutls=$? PCMK_GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] ) AC_ARG_WITH([concurrent-fencing-default], [AS_HELP_STRING([--with-concurrent-fencing-default], [default value for concurrent-fencing cluster option @<:@false@:>@])], ) AS_CASE([$with_concurrent_fencing_default], [""], [with_concurrent_fencing_default="false"], [false], [], [true], [PCMK_FEATURES="$PCMK_FEATURES default-concurrent-fencing"], [AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])] ) AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT], ["$with_concurrent_fencing_default"], [Default value for concurrent-fencing cluster option]) AC_ARG_WITH([sbd-sync-default], [AS_HELP_STRING([--with-sbd-sync-default], m4_normalize([ default value used by sbd if SBD_SYNC_RESOURCE_STARTUP environment variable is not set @<:@false@:>@]))], ) AS_CASE([$with_sbd_sync_default], [""], [with_sbd_sync_default=false], [false], [], [true], [PCMK_FEATURES="$PCMK_FEATURES default-sbd-sync"], [AC_MSG_ERROR([Invalid value "$with_sbd_sync_default" for --with-sbd-sync-default])] ) AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT], [$with_sbd_sync_default], [Default value for SBD_SYNC_RESOURCE_STARTUP environment variable]) AC_ARG_WITH([resource-stickiness-default], [AS_HELP_STRING([--with-resource-stickiness-default], [If positive, value to add to new CIBs as explicit resource default for resource-stickiness @<:@0@:>@])], ) errmsg="Invalid value \"$with_resource_stickiness_default\" for --with-resource-stickiness-default" AS_CASE([$with_resource_stickiness_default], [0|""], [with_resource_stickiness_default="0"], [*[[!0-9]]*], [AC_MSG_ERROR([$errmsg])], [PCMK_FEATURES="$PCMK_FEATURES default-resource-stickiness"] ) AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT], [$with_resource_stickiness_default], [Default value for resource-stickiness resource meta-attribute]) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer @<:@try@:>@])] ) yes_no_try "$with_corosync" "try" with_corosync=$? dnl Get default from corosync if possible. PKG_CHECK_VAR([PCMK__COROSYNC_CONF], [corosync], [corosysconfdir], [PCMK__COROSYNC_CONF="$PCMK__COROSYNC_CONF/corosync.conf"], [PCMK__COROSYNC_CONF="${sysconfdir}/corosync/corosync.conf"]) AC_ARG_WITH([corosync-conf], [AS_HELP_STRING([--with-corosync-conf], m4_normalize([ location of Corosync configuration file @<:@value from Corosync package if available otherwise SYSCONFDIR/corosync/corosync.conf@:>@]))], [ PCMK__COROSYNC_CONF="$withval" ] ) AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios resources (deprecated)])] ) yes_no_try "$with_nagios" "try" with_nagios=$? dnl --with-* options: directory locations AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins (deprecated) @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata (deprecated) @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], [directory for init (rc) scripts])], [ INITDIR="$withval" ] ) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) dnl --runstatedir is available as of autoconf 2.70 (2020-12-08). When users dnl have an older version, they can use our --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) dnl Get default from resource-agents if possible. Otherwise, the default uses dnl /usr/lib rather than libdir because it's determined by the OCF project and dnl not Pacemaker. Even if a user wants to install Pacemaker to /usr/local or dnl such, the OCF agents will be expected in their usual location. However, we dnl do give the user the option to override it. PKG_CHECK_VAR([OCF_ROOT_DIR], [resource-agents], [ocfrootdir], [], [OCF_ROOT_DIR="/usr/lib/ocf"]) AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], m4_normalize([ OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@value from resource-agents package if available otherwise /usr/lib/ocf@:>@]))], [ OCF_ROOT_DIR="$withval" ] ) AC_SUBST(OCF_ROOT_DIR) AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"], [OCF root directory for resource agents and libraries]) PKG_CHECK_VAR([OCF_RA_PATH], [resource-agents], [ocfrapath], [], [OCF_RA_PATH="$OCF_ROOT_DIR/resource.d"]) AC_ARG_WITH([ocfrapath], [AS_HELP_STRING([--with-ocfrapath=DIR], m4_normalize([ OCF resource agent directories (colon-separated) to search @<:@value from resource-agents package if available otherwise OCFDIR/resource.d@:>@]))], [ OCF_RA_PATH="$withval" ] ) AC_SUBST(OCF_RA_PATH) OCF_RA_INSTALL_DIR="$OCF_ROOT_DIR/resource.d" AC_ARG_WITH([ocfrainstalldir], [AS_HELP_STRING([--with-ocfrainstalldir=DIR], m4_normalize([ OCF installation directory for Pacemakers resource agents @<:@OCFDIR/resource.d@:>@]))], [ OCF_RA_INSTALL_DIR="$withval" ] ) AC_SUBST(OCF_RA_INSTALL_DIR) dnl Get default from fence-agents if available PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix], [PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"], [PCMK__FENCE_BINDIR="$sbindir"]) AC_ARG_WITH([fence-bindir], [AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([ directory for executable fence agents @<:@value from fence-agents package if available otherwise SBINDIR@:>@]))], [ PCMK__FENCE_BINDIR="$withval" ] ) AC_SUBST(PCMK__FENCE_BINDIR) dnl --with-* options: non-production testing AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations, for effective profiling @<:@no@:>@])] ) yes_no_try "$with_profiling" "no" with_profiling=$? AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations, for effective profiling and coverage testing @<:@no@:>@])] ) yes_no_try "$with_coverage" "no" with_coverage=$? AC_ARG_WITH([sanitizers], [AS_HELP_STRING([--with-sanitizers=...,...], [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])], [ SANITIZERS="$withval" ], [ SANITIZERS="" ]) dnl Environment variable options AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) dnl =============================================== dnl General Processing dnl =============================================== AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION", [Version number of this Pacemaker build]) PACKAGE_SERIES=`echo $VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_PROG_LN_S AC_PROG_MKDIR_P # Check for fatal warning support AS_IF([test $enable_fatal_warnings -ne $DISABLED && test x"$GCC" = x"yes" && cc_supports_flag -Werror], [WERROR="-Werror"], [ WERROR="" AS_CASE([$enable_fatal_warnings], [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])], [$OPTIONAL], [ AC_MSG_NOTICE([Compiler does not support fatal warnings]) enable_fatal_warnings=$DISABLED ]) ]) AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}]) AS_CASE([$INITDIR], [prefix], [INITDIR=$prefix], [""], [ AC_MSG_CHECKING([which init (rc) directory to use]) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do AS_IF([test -d $initdir], [ INITDIR=$initdir break ]) done AC_MSG_RESULT([$INITDIR]) ]) AC_SUBST(INITDIR) dnl Expand values of autoconf-provided directory options expand_path_option prefix expand_path_option exec_prefix expand_path_option bindir expand_path_option sbindir expand_path_option libexecdir expand_path_option datadir expand_path_option sysconfdir expand_path_option sharedstatedir expand_path_option localstatedir expand_path_option libdir expand_path_option includedir expand_path_option oldincludedir expand_path_option infodir expand_path_option mandir dnl Home-grown variables expand_path_option localedir "${datadir}/locale" AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"], [Base directory for message catalogs]) AS_IF([test x"${runstatedir}" = x""], [runstatedir="${pcmk_runstatedir}"]) expand_path_option runstatedir "${localstatedir}/run" AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) expand_path_option INITDIR AC_DEFINE_UNQUOTED([PCMK__LSB_INIT_DIR], ["$INITDIR"], [Location for LSB init scripts]) expand_path_option docdir "${datadir}/doc/${PACKAGE}-${VERSION}" AC_SUBST(docdir) expand_path_option CONFIGDIR "${sysconfdir}/sysconfig" AC_SUBST(CONFIGDIR) expand_path_option PCMK__COROSYNC_CONF "${sysconfdir}/corosync/corosync.conf" AC_SUBST(PCMK__COROSYNC_CONF) expand_path_option CRM_LOG_DIR "${localstatedir}/log/pacemaker" AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) expand_path_option CRM_BUNDLE_DIR "${localstatedir}/log/pacemaker/bundles" AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) expand_path_option PCMK__FENCE_BINDIR AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR", [Location for executable fence agents]) expand_path_option OCF_RA_PATH AC_DEFINE_UNQUOTED([OCF_RA_PATH], ["$OCF_RA_PATH"], [OCF directories to search for resource agents ]) AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [], [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])]) AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) AC_SUBST(PCMK_GNUTLS_PRIORITIES) AS_IF([test x"${BUG_URL}" = x""], [BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"]) AC_SUBST(BUG_URL) AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"], [Where bugs should be reported]) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR localedir do dirname=`eval echo '${'${j}'}'` AS_IF([test ! -d "$dirname"], [AC_MSG_WARN([$j directory ($dirname) does not exist (yet)])]) done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([HAVE_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([HAVE_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) AS_IF([test -z "${us_auth}"], [ # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([HAVE_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([HAVE_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_FAILURE([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) ]) dnl OS-based decision-making is poor autotools practice; feature-based dnl mechanisms are strongly preferred. Keep this section to a bare minimum; dnl regard as a "necessary evil". INIT_EXT="" PROCFS=0 dnl Solaris and some *BSD versions support procfs but not files we need AS_CASE(["$host_os"], [*bsd*], [INIT_EXT=".sh"], [*linux*], [PROCFS=1], [darwin*], [ LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ]) AC_SUBST(INIT_EXT) AM_CONDITIONAL([SUPPORT_PROCFS], [test $PROCFS -eq 1]) AC_DEFINE_UNQUOTED([HAVE_LINUX_PROCFS], [$PROCFS], [Define to 1 if procfs is supported]) AS_CASE(["$host_cpu"], [ppc64|powerpc64], [ AS_CASE([$CFLAGS], [*powerpc64*], [], [*], [AS_IF([test x"$GCC" = x"yes"], [CFLAGS="$CFLAGS -m64"]) ]) ]) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])]) dnl Require a minimum Python version AM_PATH_PYTHON([3.4]) AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) dnl Bash is needed for building man pages and running regression tests. dnl BASH is already an environment variable, so use something else. AC_PATH_PROG([BASH_PATH], [bash]) AS_IF([test x"${BASH_PATH}" != x""], [], [AC_MSG_FAILURE([Could not find required build tool bash])]) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") AS_IF([test x"${HELP2MAN}" != x""], [PCMK_FEATURES="$PCMK_FEATURES generated-manpages"]) MANPAGE_XSLT="" AS_IF([test x"${XSLTPROC}" != x""], [ AC_MSG_CHECKING([for DocBook-to-manpage transform]) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') AS_IF([test x"${MANPAGE_XSLT}" = x""], [ DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d 2>/dev/null | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS} do AS_IF([test -f "${d}/${XSLT}"], [ MANPAGE_XSLT="${d}/${XSLT}" break ]) done ]) ]) AC_MSG_RESULT([$MANPAGE_XSLT]) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") AS_IF([test x"${MANPAGE_XSLT}" != x""], [PCMK_FEATURES="$PCMK_FEATURES agent-manpages"]) AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) AS_IF([test x"${ASCIIDOC_CONV}" != x""], [PCMK_FEATURES="$PCMK_FEATURES ascii-docs"]) AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""]) AM_COND_IF([BUILD_SPHINX_DOCS], [PCMK_FEATURES="$PCMK_FEATURES books"]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" AS_IF([test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH"], [ $GETOPT_PATH -T >/dev/null 2>/dev/null AS_IF([test $? -eq 4], [break]) ]) GETOPT_PATH="" done IFS=$IFS_orig AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])], [ AC_MSG_RESULT([no]) AC_MSG_ERROR([Could not find required build tool GNU-compatible getopt]) ]) AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolerably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolerably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir strerror strchrnul strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) PKG_CHECK_MODULES([UUID], [uuid], [CPPFLAGS="${CPPFLAGS} ${UUID_CFLAGS}" LIBS="${LIBS} ${UUID_LIBS}"]) AC_CHECK_FUNCS([sched_setscheduler]) AS_IF([test x"$ac_cv_func_sched_setscheduler" != x"yes"], [PC_LIBS_RT=""], [PC_LIBS_RT="-lrt"]) AC_SUBST(PC_LIBS_RT) # Require minimum glib version PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.42.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # Check whether high-resolution sleep function is available AC_CHECK_FUNCS([nanosleep usleep]) # # Where is dlopen? # AS_IF([test x"$ac_cv_lib_c_dlopen" = x"yes"], [LIBADD_DL=""], [test x"$ac_cv_lib_dl_dlopen" = x"yes"], [LIBADD_DL=-ldl], [LIBADD_DL=${lt_cv_dlopen_libs}]) PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) REQUIRE_LIB([xslt], [xsltApplyStylesheet]) dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) cc_temp_flags "$CFLAGS $WERROR" # Optional headers (inclusion of these should be conditional in C code) AC_CHECK_HEADERS([linux/swab.h]) AC_CHECK_HEADERS([stddef.h]) AC_CHECK_HEADERS([sys/signalfd.h]) AC_CHECK_HEADERS([uuid/uuid.h]) AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h]) # Required headers REQUIRE_HEADER([arpa/inet.h]) REQUIRE_HEADER([ctype.h]) REQUIRE_HEADER([dirent.h]) REQUIRE_HEADER([errno.h]) REQUIRE_HEADER([glib.h]) REQUIRE_HEADER([grp.h]) REQUIRE_HEADER([limits.h]) REQUIRE_HEADER([netdb.h]) REQUIRE_HEADER([netinet/in.h]) REQUIRE_HEADER([netinet/ip.h], [ #include #include ]) REQUIRE_HEADER([pwd.h]) REQUIRE_HEADER([signal.h]) REQUIRE_HEADER([stdio.h]) REQUIRE_HEADER([stdlib.h]) REQUIRE_HEADER([string.h]) REQUIRE_HEADER([strings.h]) REQUIRE_HEADER([sys/ioctl.h]) REQUIRE_HEADER([sys/param.h]) REQUIRE_HEADER([sys/reboot.h]) REQUIRE_HEADER([sys/resource.h]) REQUIRE_HEADER([sys/socket.h]) REQUIRE_HEADER([sys/stat.h]) REQUIRE_HEADER([sys/time.h]) REQUIRE_HEADER([sys/types.h]) REQUIRE_HEADER([sys/utsname.h]) REQUIRE_HEADER([sys/wait.h]) REQUIRE_HEADER([time.h]) REQUIRE_HEADER([unistd.h]) REQUIRE_HEADER([libxml/xpath.h]) REQUIRE_HEADER([libxslt/xslt.h]) cc_restore_flags AC_CHECK_FUNCS([uuid_unparse], [], [AC_MSG_FAILURE([Could not find required C function uuid_unparse()])]) AC_CACHE_CHECK([whether __progname and __progname_full are available], [pf_cv_var_progname], [AC_LINK_IFELSE( [AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]], [[__progname = "foo"; __progname_full = "foo bar";]])], [pf_cv_var_progname="yes"], [pf_cv_var_progname="no"] )] ) AS_IF([test x"$pf_cv_var_progname" = x"yes"], [AC_DEFINE(HAVE_PROGNAME,1,[Define to 1 if processes can change their name])]) dnl ======================================================================== dnl Generic declarations dnl ======================================================================== AC_CHECK_DECLS([CLOCK_MONOTONIC], [PCMK_FEATURES="$PCMK_FEATURES monotonic"], [], [[ #include ]]) dnl ======================================================================== dnl Unit test declarations dnl ======================================================================== AC_CHECK_DECLS([assert_float_equal], [], [], [[ #include #include #include #include ]]) cc_temp_flags "$CFLAGS -Wl,--wrap=uname" WRAPPABLE_UNAME="no" AC_MSG_CHECKING([if uname() can be wrapped]) AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include int __wrap_uname(struct utsname *buf) { return 100; } int main(int argc, char **argv) { struct utsname x; return uname(&x) == 100 ? 0 : 1; } ]])], [ WRAPPABLE_UNAME="yes" ], [ WRAPPABLE_UNAME="no"]) AC_MSG_RESULT([$WRAPPABLE_UNAME]) AM_CONDITIONAL([WRAPPABLE_UNAME], [test x"$WRAPPABLE_UNAME" = x"yes"]) cc_restore_flags dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== REQUIRE_FUNC([getopt]) REQUIRE_FUNC([setenv]) REQUIRE_FUNC([unsetenv]) REQUIRE_FUNC([vasprintf]) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) AS_IF([test x"$pf_cv_var_sscanf" = x"yes"], [AC_DEFINE([HAVE_SSCANF_M], [1], [Define to 1 if sscanf %m modifier is available])]) dnl ======================================================================== dnl bzip2 dnl ======================================================================== REQUIRE_HEADER([bzlib.h]) REQUIRE_LIB([bz2], [BZ2_bzBuffToBuffCompress]) dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[sighandler_t *f;]])], [ AC_MSG_RESULT([yes]) AC_DEFINE([HAVE_SIGHANDLER_T], [1], [Define to 1 if sighandler_t is available]) ], [AC_MSG_RESULT([no])]) dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurses takes precedence. dnl AC_CHECK_HEADERS([curses.h curses/curses.h ncurses.h ncurses/ncurses.h]) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" AS_IF([test x"$ac_cv_header_ncurses_h" = x"yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" ]) AS_IF([test x"$ac_cv_header_ncurses_ncurses_h" = x"yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" ]) dnl Only look for non-n-library if there was no n-library. AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_h" = x"yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" ]) dnl Only look for non-n-library if there was no n-library. AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_curses_h" = x"yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" ]) AS_IF([test x"$CURSESLIBS" != x""], [PCMK_FEATURES="$PCMK_FEATURES ncurses"]) dnl Check for printw() prototype compatibility AS_IF([test x"$CURSESLIBS" != x"" && cc_supports_flag -Wcast-qual], [ ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 AS_IF([cc_supports_flag -fPIC], [cc_temp_flags "-Wcast-qual $WERROR -fPIC"], [cc_temp_flags "-Wcast-qual $WERROR"]) AC_MSG_CHECKING([whether curses library is compatible]) AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [AC_MSG_RESULT([yes])], [ AC_MSG_RESULT([no]) AC_MSG_WARN(m4_normalize([Disabling curses because the printw() function of your (n)curses library is old. If you wish to enable curses, update to a newer version (ncurses 5.4 or later is recommended, available from https://invisible-island.net/ncurses/) ])) AC_DEFINE([HAVE_INCOMPATIBLE_PRINTW], [1], [Define to 1 if curses library has incompatible printw()]) ] ) LIBS=$ac_save_LIBS cc_restore_flags ]) AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== CFLAGS_ORIG="$CFLAGS" AS_IF([test $with_coverage -ne $DISABLED], [ with_profiling=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ] ) AS_IF([test $with_profiling -ne $DISABLED], [ with_profiling=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES profile" dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin" dnl CFLAGS="$CFLAGS -fno-inline-functions" dnl CFLAGS="$CFLAGS -fno-default-inline" dnl CFLAGS="$CFLAGS -fno-inline-functions-called-once" dnl CFLAGS="$CFLAGS -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed \ -e 's/-O.\ //g' \ -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' \ -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" AC_MSG_NOTICE([CFLAGS before adding profiling options: $CFLAGS_ORIG]) AC_MSG_NOTICE([CFLAGS after: $CFLAGS]) ] ) AC_DEFINE_UNQUOTED([SUPPORT_PROFILING], [$with_profiling], [Support profiling]) AM_CONDITIONAL([BUILD_PROFILING], [test "$with_profiling" = "$REQUIRED"]) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== PKG_CHECK_MODULES(libqb, libqb >= 0.17) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 2.0.5+ (2022-03) AC_CHECK_FUNCS([qb_ipcc_connect_async]) dnl libqb 2.0.2+ (2020-10) AC_CHECK_FUNCS([qb_ipcc_auth_get]) dnl libqb 2.0.0+ (2020-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available AS_IF([test x"$cross_compiling" != x"yes"], [CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat"]) AC_CHECK_HEADERS([stonith/stonith.h], [ AC_CHECK_LIB([pils], [PILLoadPlugin]) AC_CHECK_LIB([plumb], [G_main_add_IPC_Channel]) PCMK_FEATURES="$PCMK_FEATURES lha" ]) AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test x"$ac_cv_header_stonith_stonith_h" = x"yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"], [Directory Pacemaker daemons should change to (without systemd, core files will go here)]) AC_SUBST(CRM_CORE_DIR) AS_IF([test x"${CRM_DAEMON_USER}" = x""], [CRM_DAEMON_USER="hacluster"]) AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) AS_IF([test x"${CRM_DAEMON_GROUP}" = x""], [CRM_DAEMON_GROUP="haclient"]) AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries]) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING([build version]) BUILD_VERSION=$Format:%h$ AS_IF([test $BUILD_VERSION != ":%h$"], [AC_MSG_RESULT([$BUILD_VERSION (archive hash)])], [test -x $GIT && test -d .git], [ BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT([$BUILD_VERSION (git hash)]) ], [ # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT([$BUILD_VERSION (directory name)]) ]) AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(HAVE_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) dnl libdbus 1.5.12+ (2012-03) / 1.6.0+ (2012-06) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) AS_IF([test $HAVE_dbus = 0], [PC_NAME_DBUS=""], [PC_NAME_DBUS="dbus-1"]) AC_SUBST(PC_NAME_DBUS) AS_CASE([$enable_systemd], [$REQUIRED], [ AS_IF([test $HAVE_dbus = 0], [AC_MSG_FAILURE([Cannot support systemd resources without DBus])]) AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"], [AC_MSG_FAILURE([Cannot support systemd resources without monotonic clock])]) AS_IF([check_systemdsystemunitdir], [], [AC_MSG_FAILURE([Cannot support systemd resources without systemdsystemunitdir])]) ], [$OPTIONAL], [ AS_IF([test $HAVE_dbus = 0 \ || test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [enable_systemd=$DISABLED], [ AC_MSG_CHECKING([for systemd version (using dbus-send)]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) AS_IF([test x"$ret" != x"unavailable" \ || systemctl --version 2>/dev/null | grep -q systemd], [ AS_IF([check_systemdsystemunitdir], [enable_systemd=$REQUIRED], [enable_systemd=$DISABLED]) ], [enable_systemd=$DISABLED] ) ]) ], ) AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AS_IF([test $enable_systemd -eq $DISABLED], [AC_MSG_RESULT([no])], [ AC_MSG_RESULT([yes]) PCMK_FEATURES="$PCMK_FEATURES systemd" ] ) AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED([SUPPORT_SYSTEMD], [$enable_systemd], [Support systemd resources]) AM_CONDITIONAL([BUILD_SYSTEMD], [test $enable_systemd = $REQUIRED]) AC_SUBST(SUPPORT_SYSTEMD) AS_CASE([$enable_upstart], [$REQUIRED], [ AS_IF([test $HAVE_dbus = 0], [AC_MSG_FAILURE([Cannot support Upstart resources without DBus])]) ], [$OPTIONAL], [ AS_IF([test $HAVE_dbus = 0], [enable_upstart=$DISABLED], [ AC_MSG_CHECKING([for Upstart version (using dbus-send)]) ret=$({ dbus-send --system --print-reply \ --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) AS_IF([test x"$ret" != x"unavailable" \ || initctl --version 2>/dev/null | grep -q upstart], [enable_upstart=$REQUIRED], [enable_upstart=$DISABLED] ) ]) ], ) AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AS_IF([test $enable_upstart -eq $DISABLED], [AC_MSG_RESULT([no])], [ AC_MSG_RESULT([yes]) PCMK_FEATURES="$PCMK_FEATURES upstart" ] ) AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart], [Support Upstart resources]) AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED]) AC_SUBST(SUPPORT_UPSTART) AS_CASE([$with_nagios], [$REQUIRED], [ AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [AC_MSG_FAILURE([Cannot support nagios resources without monotonic clock])]) ], [$OPTIONAL], [ AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [with_nagios=$DISABLED], [with_nagios=$REQUIRED]) ] ) AS_IF([test $with_nagios -eq $REQUIRED], [PCMK_FEATURES="$PCMK_FEATURES nagios"]) AC_DEFINE_UNQUOTED([SUPPORT_NAGIOS], [$with_nagios], [Support nagios plugins]) AM_CONDITIONAL([BUILD_NAGIOS], [test $with_nagios -eq $REQUIRED]) AS_IF([test x"$NAGIOS_PLUGIN_DIR" = x""], [NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"]) AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) AS_IF([test x"$NAGIOS_METADATA_DIR" = x""], [NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata"]) AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== COROSYNC_LIBS="" AS_CASE([$with_corosync], [$REQUIRED], [ # These will be fatal if unavailable PKG_CHECK_MODULES([cpg], [libcpg]) PKG_CHECK_MODULES([cfg], [libcfg]) PKG_CHECK_MODULES([cmap], [libcmap]) PKG_CHECK_MODULES([quorum], [libquorum]) PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common]) ] [$OPTIONAL], [ PKG_CHECK_MODULES([cpg], [libcpg], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([cfg], [libcfg], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([cmap], [libcmap], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([quorum], [libquorum], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common], [], [with_corosync=$DISABLED]) AS_IF([test $with_corosync -ne $DISABLED], [with_corosync=$REQUIRED]) ] ) AS_IF([test $with_corosync -ne $DISABLED], [ AC_MSG_CHECKING([for Corosync 2 or later]) AC_MSG_RESULT([yes]) CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" CPPFLAGS="$CPPFLAGS `$PKG_CONFIG --cflags-only-I corosync`" COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-ge-2" dnl Shutdown tracking added (back) to corosync Jan 2021 saved_LIBS="$LIBS" LIBS="$LIBS $COROSYNC_LIBS" AC_CHECK_FUNCS([corosync_cfg_trackstart]) LIBS="$saved_LIBS" ] ) AC_DEFINE_UNQUOTED([SUPPORT_COROSYNC], [$with_corosync], [Support the Corosync messaging and membership layer]) AM_CONDITIONAL([BUILD_CS_SUPPORT], [test $with_corosync -eq $REQUIRED]) AC_SUBST([SUPPORT_COROSYNC]) dnl dnl Cluster stack - Sanity dnl AS_IF([test x"$STACKS" != x""], [AC_MSG_NOTICE([Supported stacks:${STACKS}])], [AC_MSG_FAILURE([At least one cluster stack must be supported])]) PCMK_FEATURES="${PCMK_FEATURES}${STACKS}" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== AS_IF([test $with_cibsecrets -ne $DISABLED], [ with_cibsecrets=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED([LRM_CIBSECRETS_DIR], ["$LRM_CIBSECRETS_DIR"], [Location for CIB secrets]) AC_SUBST([LRM_CIBSECRETS_DIR]) ] ) AC_DEFINE_UNQUOTED([SUPPORT_CIBSECRETS], [$with_cibsecrets], [Support CIB secrets]) AM_CONDITIONAL([BUILD_CIBSECRETS], [test $with_cibsecrets -eq $REQUIRED]) dnl ======================================================================== dnl GnuTLS dnl ======================================================================== dnl Require GnuTLS >=2.12.0 (2011-03) for Pacemaker Remote support PC_NAME_GNUTLS="" AS_CASE([$with_gnutls], [$REQUIRED], [ REQUIRE_LIB([gnutls], [gnutls_sec_param_to_pk_bits]) REQUIRE_HEADER([gnutls/gnutls.h]) ], [$OPTIONAL], [ AC_CHECK_LIB([gnutls], [gnutls_sec_param_to_pk_bits], [], [with_gnutls=$DISABLED]) AC_CHECK_HEADERS([gnutls/gnutls.h], [], [with_gnutls=$DISABLED]) ] ) AS_IF([test $with_gnutls -ne $DISABLED], [ PC_NAME_GNUTLS="gnutls" PCMK_FEATURES="$PCMK_FEATURES remote" ] ) AC_SUBST([PC_NAME_GNUTLS]) AM_CONDITIONAL([BUILD_REMOTE], [test $with_gnutls -ne $DISABLED]) # --- ASAN/UBSAN/TSAN (see man gcc) --- # when using SANitizers, we need to pass the -fsanitize.. # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be # specified as first in the list or there will be runtime # issues (for example user has to LD_PRELOAD asan for it to work # properly). AS_IF([test -n "${SANITIZERS}"], [ SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g') for SANITIZER in $SANITIZERS do AS_CASE([$SANITIZER], [asan|ASAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan" PCMK_FEATURES="$PCMK_FEATURES asan" REQUIRE_LIB([asan],[main]) ], [ubsan|UBSAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan" PCMK_FEATURES="$PCMK_FEATURES ubsan" REQUIRE_LIB([ubsan],[main]) ], [tsan|TSAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan" PCMK_FEATURES="$PCMK_FEATURES tsan" REQUIRE_LIB([tsan],[main]) ]) done ]) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. AS_IF([export | fgrep " CFLAGS=" > /dev/null], [ SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS ]) CC_EXTRAS="" AS_IF([test x"$GCC" != x"yes"], [CFLAGS="$CFLAGS -g"], [ CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags AS_IF([cc_supports_flag "-Wformat-nonliteral"], [gcc_format_nonliteral=yes], [gcc_format_nonliteral=no]) # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline" EXTRA_FLAGS="$EXTRA_FLAGS -Wall" EXTRA_FLAGS="$EXTRA_FLAGS -Waggregate-return" EXTRA_FLAGS="$EXTRA_FLAGS -Wbad-function-cast" EXTRA_FLAGS="$EXTRA_FLAGS -Wcast-align" EXTRA_FLAGS="$EXTRA_FLAGS -Wdeclaration-after-statement" EXTRA_FLAGS="$EXTRA_FLAGS -Wendif-labels" EXTRA_FLAGS="$EXTRA_FLAGS -Wfloat-equal" EXTRA_FLAGS="$EXTRA_FLAGS -Wformat-security" EXTRA_FLAGS="$EXTRA_FLAGS -Wimplicit-fallthrough" EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-prototypes" EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-declarations" EXTRA_FLAGS="$EXTRA_FLAGS -Wnested-externs" EXTRA_FLAGS="$EXTRA_FLAGS -Wno-long-long" EXTRA_FLAGS="$EXTRA_FLAGS -Wno-strict-aliasing" EXTRA_FLAGS="$EXTRA_FLAGS -Wpointer-arith" EXTRA_FLAGS="$EXTRA_FLAGS -Wstrict-prototypes" EXTRA_FLAGS="$EXTRA_FLAGS -Wwrite-strings" EXTRA_FLAGS="$EXTRA_FLAGS -Wunused-but-set-variable" EXTRA_FLAGS="$EXTRA_FLAGS -Wunsigned-char" AS_IF([test x"$gcc_diagnostic_push_pull" = x"yes"], [ AC_DEFINE([HAVE_FORMAT_NONLITERAL], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" ], [test x"$gcc_format_nonliteral" = x"yes"], [EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"]) # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do AS_IF([cc_supports_flag $CC_EXTRAS $j], [CC_EXTRAS="$CC_EXTRAS $j"]) done AC_MSG_NOTICE([Using additional gcc flags: ${CC_EXTRAS}]) ]) dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl pacemaker-based, pacemaker-fenced, pacemaker-remoted, dnl pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl AS_IF([test $enable_hardening -eq $OPTIONAL], [ AS_IF([test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0], [enable_hardening=$REQUIRED], [AC_MSG_NOTICE([Hardening: using custom flags from environment])] ) ], [ unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB ] ) AS_CASE([$enable_hardening], [$DISABLED], [AC_MSG_NOTICE([Hardening: explicitly disabled])], [$REQUIRED], [ CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 stackprot="none" # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" relro=1 ]) # daemons: PIE for both CFLAGS and LDFLAGS AS_IF([cc_supports_flag -fPIE], [ flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [ CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE" LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" pie=1 ]) ] ) # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries AS_IF([test "${relro}" = 1 && test "${pie}" = 1], [ flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" bindnow=1 ]) ] ) AS_IF([test "${bindnow}" = 1], [ flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" ]) ]) # universal: prefer strong > all > default stack protector if possible flag= AS_IF([cc_supports_flag -fstack-protector-strong], [ flag="-fstack-protector-strong" stackprot="strong" ], [cc_supports_flag -fstack-protector-all], [ flag="-fstack-protector-all" stackprot="all" ], [cc_supports_flag -fstack-protector], [ flag="-fstack-protector" stackprot="default" ] ) AS_IF([test -n "${flag}"], [CC_EXTRAS="${CC_EXTRAS} ${flag}"]) # universal: enable stack clash protection if possible AS_IF([cc_supports_flag -fstack-clash-protection], [ CC_EXTRAS="${CC_EXTRAS} -fstack-clash-protection" AS_IF([test "${stackprot}" = "none"], [stackprot="clash-only"], [stackprot="${stackprot}+clash"] ) ] ) # Log a summary AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test x"${stackprot}" != x"none"], [AC_MSG_NOTICE(m4_normalize([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${stackprot}])) ], [AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])] ) ], ) CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS" LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS" CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE" LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl AS_IF([test $enable_fatal_warnings -ne $DISABLED], [ AC_MSG_NOTICE([Enabling fatal compiler warnings]) CFLAGS="$CFLAGS $WERROR" ]) AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output AS_IF([test $enable_quiet -ne $DISABLED], [ AC_MSG_NOTICE([Suppressing make details]) QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant ], [ QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" ] ) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" # Make features list available (sorted alphabetically, without leading space) PCMK_FEATURES=`echo "$PCMK_FEATURES" | sed -e 's/^ //' -e 's/ /\n/g' | sort | xargs` AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) dnl Files we output that need to be executable CONFIG_FILES_EXEC([agents/ocf/ClusterMon], [agents/ocf/Dummy], [agents/ocf/HealthCPU], [agents/ocf/HealthIOWait], [agents/ocf/HealthSMART], [agents/ocf/Stateful], [agents/ocf/SysInfo], [agents/ocf/attribute], [agents/ocf/controld], [agents/ocf/ifspeed], [agents/ocf/o2cb], [agents/ocf/ping], [agents/ocf/remote], [agents/stonith/fence_legacy], [agents/stonith/fence_watchdog], [cts/cts-attrd], [cts/cts-cli], [cts/cts-exec], [cts/cts-fencing], [cts/cts-regression], [cts/cts-scheduler], [cts/lxc_autogen.sh], [cts/benchmark/clubench], [cts/lab/CTSlab.py], [cts/lab/OCFIPraTest.py], [cts/lab/cluster_test], [cts/lab/cts], [cts/lab/cts-log-watcher], [cts/support/LSBDummy], [cts/support/cts-support], [cts/support/fence_dummy], [cts/support/pacemaker-cts-dummyd], [doc/abi-check], [maint/bumplibs], [tools/cluster-clean], [tools/cluster-helper], [tools/cluster-init], [tools/crm_failcount], [tools/crm_master], [tools/crm_report], [tools/crm_standby], [tools/cibsecret], [tools/pcmk_simtimes]) dnl Other files we output AC_CONFIG_FILES(Makefile \ agents/Makefile \ agents/alerts/Makefile \ agents/ocf/Makefile \ agents/stonith/Makefile \ cts/Makefile \ cts/benchmark/Makefile \ cts/lab/Makefile \ cts/scheduler/Makefile \ cts/scheduler/dot/Makefile \ cts/scheduler/exp/Makefile \ cts/scheduler/scores/Makefile \ cts/scheduler/stderr/Makefile \ cts/scheduler/summary/Makefile \ cts/scheduler/xml/Makefile \ cts/support/Makefile \ cts/support/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/schedulerd/Makefile \ devel/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/sphinx/Makefile \ etc/Makefile \ etc/init.d/pacemaker \ etc/logrotate.d/pacemaker \ etc/sysconfig/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ lib/Makefile \ lib/cib/Makefile \ lib/cluster/Makefile \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/acl/Makefile \ lib/common/tests/agents/Makefile \ lib/common/tests/cmdline/Makefile \ lib/common/tests/flags/Makefile \ lib/common/tests/health/Makefile \ lib/common/tests/io/Makefile \ lib/common/tests/iso8601/Makefile \ lib/common/tests/lists/Makefile \ lib/common/tests/nvpair/Makefile \ lib/common/tests/operations/Makefile \ lib/common/tests/options/Makefile \ lib/common/tests/output/Makefile \ lib/common/tests/procfs/Makefile \ lib/common/tests/results/Makefile \ lib/common/tests/scores/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/common/tests/xml/Makefile \ lib/common/tests/xpath/Makefile \ lib/fencing/Makefile \ lib/gnu/Makefile \ lib/libpacemaker.pc \ lib/lrmd/Makefile \ lib/pacemaker/Makefile \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-cluster.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/native/Makefile \ lib/pengine/tests/rules/Makefile \ lib/pengine/tests/status/Makefile \ lib/pengine/tests/unpack/Makefile \ lib/pengine/tests/utils/Makefile \ lib/services/Makefile \ maint/Makefile \ po/Makefile.in \ python/Makefile \ python/setup.py \ python/pacemaker/Makefile \ python/pacemaker/_cts/Makefile \ + python/pacemaker/_cts/tests/Makefile \ python/pacemaker/buildoptions.py \ python/tests/Makefile \ replace/Makefile \ rpm/Makefile \ tests/Makefile \ tools/Makefile \ tools/crm_mon.service \ tools/crm_mon.upstart \ tools/report.collector \ tools/report.common \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_NOTICE([]) AC_MSG_NOTICE([$PACKAGE configuration:]) AC_MSG_NOTICE([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_NOTICE([ Features = ${PCMK_FEATURES}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ Prefix = ${prefix}]) AC_MSG_NOTICE([ Executables = ${sbindir}]) AC_MSG_NOTICE([ Man pages = ${mandir}]) AC_MSG_NOTICE([ Libraries = ${libdir}]) AC_MSG_NOTICE([ Header files = ${includedir}]) AC_MSG_NOTICE([ Arch-independent files = ${datadir}]) AC_MSG_NOTICE([ State information = ${localstatedir}]) AC_MSG_NOTICE([ System configuration = ${sysconfdir}]) AC_MSG_NOTICE([ OCF agents = ${OCF_ROOT_DIR}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_NOTICE([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ CFLAGS = ${CFLAGS}]) AC_MSG_NOTICE([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_NOTICE([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_NOTICE([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_NOTICE([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_NOTICE([ Libraries = ${LIBS}]) AC_MSG_NOTICE([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_NOTICE([ Unix socket auth method = ${us_auth}]) diff --git a/cts/lab/CTSscenarios.py b/cts/lab/CTSscenarios.py index 1d17f448ac..9e26d1797e 100644 --- a/cts/lab/CTSscenarios.py +++ b/cts/lab/CTSscenarios.py @@ -1,563 +1,562 @@ """ Test scenario classes for Pacemaker's Cluster Test Suite (CTS) """ __copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import os import re import sys import time -from cts.CTStests import CTSTest - from pacemaker._cts.audits import ClusterAudit +from pacemaker._cts.tests.ctstest import CTSTest from pacemaker._cts.watcher import LogWatcher class ScenarioComponent(object): def __init__(self, Env): self.Env = Env def IsApplicable(self): '''Return True if the current ScenarioComponent is applicable in the given LabEnvironment given to the constructor. ''' raise ValueError("Abstract Class member (IsApplicable)") def SetUp(self, CM): '''Set up the given ScenarioComponent''' raise ValueError("Abstract Class member (Setup)") def TearDown(self, CM): '''Tear down (undo) the given ScenarioComponent''' raise ValueError("Abstract Class member (Setup)") class Scenario(object): ( '''The basic idea of a scenario is that of an ordered list of ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn, and then after the tests have been run, they are torn down using TearDown() (in reverse order). A Scenario is applicable to a particular cluster manager iff each ScenarioComponent is applicable. A partially set up scenario is torn down if it fails during setup. ''') def __init__(self, ClusterManager, Components, Audits, Tests): "Initialize the Scenario from the list of ScenarioComponents" self.ClusterManager = ClusterManager self.Components = Components self.Audits = Audits self.Tests = Tests self.BadNews = None self.TestSets = [] self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0} self.Sets = [] #self.ns=CTS.NodeStatus(self.Env) for comp in Components: if not issubclass(comp.__class__, ScenarioComponent): raise ValueError("Init value must be subclass of ScenarioComponent") for audit in Audits: if not issubclass(audit.__class__, ClusterAudit): raise ValueError("Init value must be subclass of ClusterAudit") for test in Tests: if not issubclass(test.__class__, CTSTest): raise ValueError("Init value must be a subclass of CTSTest") def IsApplicable(self): ( '''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable() ''' ) for comp in self.Components: if not comp.IsApplicable(): return None return True def SetUp(self): '''Set up the Scenario. Return TRUE on success.''' self.ClusterManager.prepare() self.audit() # Also detects remote/local log config self.ClusterManager.ns.wait_for_all_nodes(self.ClusterManager.Env["nodes"]) self.audit() self.ClusterManager.install_support() self.BadNews = LogWatcher(self.ClusterManager.Env["LogFileName"], self.ClusterManager.templates.get_patterns("BadNews"), self.ClusterManager.Env["nodes"], self.ClusterManager.Env["LogWatcher"], "BadNews", 0) self.BadNews.set_watch() # Call after we've figured out what type of log watching to do in LogAudit j = 0 while j < len(self.Components): if not self.Components[j].SetUp(self.ClusterManager): # OOPS! We failed. Tear partial setups down. self.audit() self.ClusterManager.log("Tearing down partial setup") self.TearDown(j) return None j = j + 1 self.audit() return 1 def TearDown(self, max=None): '''Tear Down the Scenario - in reverse order.''' if max == None: max = len(self.Components)-1 j = max while j >= 0: self.Components[j].TearDown(self.ClusterManager) j = j - 1 self.audit() self.ClusterManager.install_support("uninstall") def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 def run(self, Iterations): self.ClusterManager.oprofileStart() try: self.run_loop(Iterations) self.ClusterManager.oprofileStop() except: self.ClusterManager.oprofileStop() raise def run_loop(self, Iterations): raise ValueError("Abstract Class member (run_loop)") def run_test(self, test, testcount): nodechoice = self.ClusterManager.Env.random_node() - ret = 1 + ret = True where = "" did_run = 0 self.ClusterManager.instance_errorstoignore_clear() self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) + "[" + ("%d" % testcount).rjust(3) + "]") starttime = test.set_timer() if not test.setup(nodechoice): self.ClusterManager.log("Setup failed") - ret = 0 + ret = False - elif not test.canrunnow(nodechoice): + elif not test.can_run_now(nodechoice): self.ClusterManager.log("Skipped") test.skipped() else: did_run = 1 ret = test(nodechoice) if not test.teardown(nodechoice): self.ClusterManager.log("Teardown failed") if self.ClusterManager.Env["continue"]: answer = "Y" else: try: answer = input('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice)) - ret = 0 + ret = False stoptime = time.time() self.ClusterManager.oprofileSave(testcount) elapsed_time = stoptime - starttime test_time = stoptime - test.get_timer() - if not test["min_time"]: - test["elapsed_time"] = elapsed_time - test["min_time"] = test_time - test["max_time"] = test_time + if "min_time" not in test.stats: + test.stats["elapsed_time"] = elapsed_time + test.stats["min_time"] = test_time + test.stats["max_time"] = test_time else: - test["elapsed_time"] = test["elapsed_time"] + elapsed_time - if test_time < test["min_time"]: - test["min_time"] = test_time - if test_time > test["max_time"]: - test["max_time"] = test_time + test.stats["elapsed_time"] = test.stats["elapsed_time"] + elapsed_time + if test_time < test.stats["min_time"]: + test.stats["min_time"] = test_time + if test_time > test.stats["max_time"]: + test.stats["max_time"] = test_time if ret: self.incr("success") test.log_timer() else: self.incr("failure") self.ClusterManager.statall() did_run = 1 # Force the test count to be incremented anyway so test extraction works - self.audit(test.errorstoignore()) + self.audit(test.errors_to_ignore) return did_run def summarize(self): self.ClusterManager.log("****************") self.ClusterManager.log("Overall Results:" + repr(self.Stats)) self.ClusterManager.log("****************") stat_filter = { "calls":0, "failure":0, "skipped":0, "auditfail":0, } self.ClusterManager.log("Test Summary") for test in self.Tests: for key in list(stat_filter.keys()): - stat_filter[key] = test.Stats[key] + stat_filter[key] = test.stats[key] self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter)) self.ClusterManager.debug("Detailed Results") for test in self.Tests: - self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats)) + self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.stats)) self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED") def audit(self, LocalIgnore=[]): errcount = 0 ignorelist = [] ignorelist.append("CTS:") ignorelist.extend(LocalIgnore) ignorelist.extend(self.ClusterManager.errorstoignore()) ignorelist.extend(self.ClusterManager.instance_errorstoignore()) # This makes sure everything is stabilized before starting... failed = 0 for audit in self.Audits: if not audit(): self.ClusterManager.log("Audit " + audit.name + " FAILED.") failed += 1 else: self.ClusterManager.debug("Audit " + audit.name + " passed.") while errcount < 1000: match = None if self.BadNews: match = self.BadNews.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.ClusterManager.log("BadNews: " + match) self.incr("BadNews") errcount = errcount + 1 else: break else: if self.ClusterManager.Env["continue"]: answer = "Y" else: try: answer = input('Big problems. Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": self.ClusterManager.log("Shutting down.") self.summarize() self.TearDown() raise ValueError("Looks like we hit a BadNews jackpot!") if self.BadNews: self.BadNews.end() return failed class AllOnce(Scenario): '''Every Test Once''' # Accessable as __doc__ def run_loop(self, Iterations): testcount = 1 for test in self.Tests: self.run_test(test, testcount) testcount += 1 class RandomTests(Scenario): '''Random Test Execution''' def run_loop(self, Iterations): testcount = 1 while testcount <= Iterations: test = self.ClusterManager.Env.random_gen.choice(self.Tests) self.run_test(test, testcount) testcount += 1 class BasicSanity(Scenario): '''Basic Cluster Sanity''' def run_loop(self, Iterations): testcount = 1 while testcount <= Iterations: test = self.Environment.random_gen.choice(self.Tests) self.run_test(test, testcount) testcount += 1 class Sequence(Scenario): '''Named Tests in Sequence''' def run_loop(self, Iterations): testcount = 1 while testcount <= Iterations: for test in self.Tests: self.run_test(test, testcount) testcount += 1 class Boot(Scenario): '''Start the Cluster''' def run_loop(self, Iterations): testcount = 0 class BootCluster(ScenarioComponent): ( '''BootCluster is the most basic of ScenarioComponents. This ScenarioComponent simply starts the cluster manager on all the nodes. It is fairly robust as it waits for all nodes to come up before starting as they might have been rebooted or crashed for some reason beforehand. ''') def __init__(self, Env): pass def IsApplicable(self): '''BootCluster is so generic it is always Applicable''' return True def SetUp(self, CM): '''Basic Cluster Manager startup. Start everything''' CM.prepare() # Clear out the cobwebs ;-) CM.stopall(verbose=True, force=True) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on all nodes.") return CM.startall(verbose=True, quick=True) def TearDown(self, CM, force=False): '''Set up the given ScenarioComponent''' # Stop the cluster manager everywhere CM.log("Stopping Cluster Manager on all nodes") return CM.stopall(verbose=True, force=force) class LeaveBooted(BootCluster): def TearDown(self, CM): '''Set up the given ScenarioComponent''' # Stop the cluster manager everywhere CM.log("Leaving Cluster running on all nodes") return 1 class PingFest(ScenarioComponent): ( '''PingFest does a flood ping to each node in the cluster from the test machine. If the LabEnvironment Parameter PingSize is set, it will be used as the size of ping packet requested (via the -s option). If it is not set, it defaults to 1024 bytes. According to the manual page for ping: Outputs packets as fast as they come back or one hundred times per second, whichever is more. For every ECHO_REQUEST sent a period ``.'' is printed, while for every ECHO_REPLY received a backspace is printed. This provides a rapid display of how many packets are being dropped. Only the super-user may use this option. This can be very hard on a net- work and should be used with caution. ''' ) def __init__(self, Env): self.Env = Env def IsApplicable(self): '''PingFests are always applicable ;-) ''' return True def SetUp(self, CM): '''Start the PingFest!''' self.PingSize = 1024 if "PingSize" in list(CM.Env.keys()): self.PingSize = CM.Env["PingSize"] CM.log("Starting %d byte flood pings" % self.PingSize) self.PingPids = [] for node in CM.Env["nodes"]: self.PingPids.append(self._pingchild(node)) CM.log("Ping PIDs: " + repr(self.PingPids)) return 1 def TearDown(self, CM): '''Stop it right now! My ears are pinging!!''' for pid in self.PingPids: if pid != None: CM.log("Stopping ping process %d" % pid) os.kill(pid, signal.SIGKILL) def _pingchild(self, node): Args = ["ping", "-qfn", "-s", str(self.PingSize), node] sys.stdin.flush() sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid < 0: self.Env.log("Cannot fork ping child") return None if pid > 0: return pid # Otherwise, we're the child process. os.execvp("ping", Args) self.Env.log("Cannot execvp ping: " + repr(Args)) sys.exit(1) class BasicSanityCheck(ScenarioComponent): ( ''' ''') def IsApplicable(self): return self.Env["DoBSC"] def SetUp(self, CM): CM.prepare() # Clear out the cobwebs self.TearDown(CM) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on BSC node(s).") return CM.startall() def TearDown(self, CM): CM.log("Stopping Cluster Manager on BSC node(s).") return CM.stopall() class Benchmark(ScenarioComponent): ( ''' ''') def IsApplicable(self): return self.Env["benchmark"] def SetUp(self, CM): CM.prepare() # Clear out the cobwebs self.TearDown(CM, force=True) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on all node(s).") return CM.startall() def TearDown(self, CM): CM.log("Stopping Cluster Manager on all node(s).") return CM.stopall() class RollingUpgrade(ScenarioComponent): ( ''' Test a rolling upgrade between two versions of the stack ''') def __init__(self, Env): self.Env = Env def IsApplicable(self): if not self.Env["rpm-dir"]: return None if not self.Env["current-version"]: return None if not self.Env["previous-version"]: return None return True def install(self, node, version): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version) self.CM.rsh(node, "mkdir -p %s" % target_dir) rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir)) self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir)) return self.success() def upgrade(self, node): return self.install(node, self.CM.Env["current-version"]) def downgrade(self, node): return self.install(node, self.CM.Env["previous-version"]) def SetUp(self, CM): print(repr(self)+"prepare") CM.prepare() # Clear out the cobwebs CM.stopall(force=True) CM.log("Downgrading all nodes to %s." % self.Env["previous-version"]) for node in self.Env["nodes"]: if not self.downgrade(node): CM.log("Couldn't downgrade %s" % node) return None return 1 def TearDown(self, CM): # Stop everything CM.log("Stopping Cluster Manager on Upgrade nodes.") CM.stopall() CM.log("Upgrading all nodes to %s." % self.Env["current-version"]) for node in self.Env["nodes"]: if not self.upgrade(node): CM.log("Couldn't upgrade %s" % node) return None return 1 diff --git a/cts/lab/CTStests.py b/cts/lab/CTStests.py index 8d4beb2b35..5105abe1ce 100644 --- a/cts/lab/CTStests.py +++ b/cts/lab/CTStests.py @@ -1,3178 +1,2290 @@ """ Test-specific classes for Pacemaker's Cluster Test Suite (CTS) """ __copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import os import re import time -import subprocess import tempfile from stat import * from pacemaker import BuildOptions from pacemaker._cts.CTS import NodeStatus -from pacemaker._cts.audits import AuditConstraint, AuditResource -from pacemaker._cts.environment import EnvFactory -from pacemaker._cts.logging import LogFactory -from pacemaker._cts.patterns import PatternSelector -from pacemaker._cts.remote import RemoteFactory -from pacemaker._cts.watcher import LogWatcher +from pacemaker._cts.audits import AuditResource +from pacemaker._cts.tests import CTSTest, RemoteDriver, SimulStartLite, SimulStopLite, StartTest, StopTest +from pacemaker._cts.timer import Timer AllTestClasses = [ ] -class CTSTest(object): - ''' - A Cluster test. - We implement the basic set of properties and behaviors for a generic - cluster test. - - Cluster tests track their own statistics. - We keep each of the kinds of counts we track as separate {name,value} - pairs. - ''' - - def __init__(self, cm): - #self.name="the unnamed test" - self.Stats = {"calls":0 - , "success":0 - , "failure":0 - , "skipped":0 - , "auditfail":0} - -# if not issubclass(cm.__class__, ClusterManager): -# raise ValueError("Must be a ClusterManager object") - self.CM = cm - self.Env = EnvFactory().getInstance() - self.rsh = RemoteFactory().getInstance() - self.logger = LogFactory() - self.templates = PatternSelector(cm["Name"]) - self.Audits = [] - self.timeout = 120 - self.passed = 1 - self.is_loop = 0 - self.is_unsafe = 0 - self.is_experimental = 0 - self.is_container = 0 - self.is_valgrind = 0 - self.benchmark = 0 # which tests to benchmark - self.timer = {} # timers - - def log(self, args): - self.logger.log(args) - - def debug(self, args): - self.logger.debug(args) - - def has_key(self, key): - return key in self.Stats - - def __setitem__(self, key, value): - self.Stats[key] = value - - def __getitem__(self, key): - if str(key) == "0": - raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead") - - if key in self.Stats: - return self.Stats[key] - return None - - def log_mark(self, msg): - self.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) - return - - def get_timer(self,key = "test"): - try: return self.timer[key] - except: return 0 - - def set_timer(self,key = "test"): - self.timer[key] = time.time() - return self.timer[key] - - def log_timer(self,key = "test"): - elapsed = 0 - if key in self.timer: - elapsed = time.time() - self.timer[key] - s = key == "test" and self.name or "%s:%s" % (self.name,key) - self.debug("%s runtime: %.2f" % (s, elapsed)) - del self.timer[key] - return elapsed - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' - if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - - # Reset the test passed boolean - if name == "calls": - self.passed = 1 - - def failure(self, reason="none"): - '''Increment the failure count''' - self.passed = 0 - self.incr("failure") - self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason) - return None - - def success(self): - '''Increment the success count''' - self.incr("success") - return 1 - - def skipped(self): - '''Increment the skipped count''' - self.incr("skipped") - return 1 - - def __call__(self, node): - '''Perform the given test''' - raise ValueError("Abstract Class member (__call__)") - self.incr("calls") - return self.failure() - - def audit(self): - passed = 1 - if len(self.Audits) > 0: - for audit in self.Audits: - if not audit(): - self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name)) - self.incr("auditfail") - passed = 0 - return passed - - def setup(self, node): - '''Setup the given test''' - return self.success() - - def teardown(self, node): - '''Tear down the given test''' - return self.success() - - def create_watch(self, patterns, timeout, name=None): - if not name: - name = self.name - return LogWatcher(self.Env["LogFileName"], patterns, self.Env["nodes"], self.Env["LogWatcher"], name, timeout) - - def local_badnews(self, prefix, watch, local_ignore=[]): - errcount = 0 - if not prefix: - prefix = "LocalBadNews:" - - ignorelist = [] - ignorelist.append(" CTS: ") - ignorelist.append(prefix) - ignorelist.extend(local_ignore) - - while errcount < 100: - match = watch.look(0) - if match: - add_err = 1 - for ignore in ignorelist: - if add_err == 1 and re.search(ignore, match): - add_err = 0 - if add_err == 1: - self.logger.log(prefix + " " + match) - errcount = errcount + 1 - else: - break - else: - self.logger.log("Too many errors!") - - watch.end() - return errcount - - def is_applicable(self): - return self.is_applicable_common() - - def is_applicable_common(self): - '''Return True if we are applicable in the current test configuration''' - #raise ValueError("Abstract Class member (is_applicable)") - - if self.is_loop and not self.Env["loop-tests"]: - return False - elif self.is_unsafe and not self.Env["unsafe-tests"]: - return False - elif self.is_valgrind and not self.Env["valgrind-tests"]: - return False - elif self.is_experimental and not self.Env["experimental-tests"]: - return False - elif self.is_container and not self.Env["container-tests"]: - return False - elif self.Env["benchmark"] and self.benchmark == 0: - return False - - return True - - def find_ocfs2_resources(self, node): - self.r_o2cb = None - self.r_ocfs2 = [] - - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) - for line in lines: - if re.search("^Resource", line): - r = AuditResource(self.CM, line) - if r.rtype == "o2cb" and r.parent != "NA": - self.debug("Found o2cb: %s" % self.r_o2cb) - self.r_o2cb = r.parent - if re.search("^Constraint", line): - c = AuditConstraint(self.CM, line) - if c.type == "rsc_colocation" and c.target == self.r_o2cb: - self.r_ocfs2.append(c.rsc) - - self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) - return len(self.r_ocfs2) - - def canrunnow(self, node): - '''Return TRUE if we can meaningfully run right now''' - return 1 - - def errorstoignore(self): - '''Return list of errors which are 'normal' and should be ignored''' - return [] - - -class StopTest(CTSTest): - '''Stop (deactivate) the cluster manager on a node''' - def __init__(self, cm): - CTSTest.__init__(self, cm) - self.name = "Stop" - - def __call__(self, node): - '''Perform the 'stop' test. ''' - self.incr("calls") - if self.CM.ShouldBeStatus[node] != "up": - return self.skipped() - - patterns = [] - # Technically we should always be able to notice ourselves stopping - patterns.append(self.templates["Pat:We_stopped"] % node) - - # Any active node needs to notice this one left - # (note that this won't work if we have multiple partitions) - for other in self.Env["nodes"]: - if self.CM.ShouldBeStatus[other] == "up" and other != node: - patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) - #self.debug("Checking %s will notice %s left"%(other, node)) - - watch = self.create_watch(patterns, self.Env["DeadTime"]) - watch.set_watch() - - if node == self.CM.OurNode: - self.incr("us") - else: - if self.CM.upcount() <= 1: - self.incr("all") - else: - self.incr("them") - - self.CM.StopaCM(node) - watch_result = watch.look_for_all() - - failreason = None - UnmatchedList = "||" - if watch.unmatched: - (_, output) = self.rsh(node, "/bin/ps axf", verbose=1) - for line in output: - self.debug(line) - - (_, output) = self.rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", verbose=1) - for line in output: - self.debug(line) - - for regex in watch.unmatched: - self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex)) - UnmatchedList += regex + "||"; - failreason = "Missing shutdown pattern" - - self.CM.cluster_stable(self.Env["DeadTime"]) - - if not watch.unmatched or self.CM.upcount() == 0: - return self.success() - - if len(watch.unmatched) >= self.CM.upcount(): - return self.failure("no match against (%s)" % UnmatchedList) - - if failreason == None: - return self.success() - else: - return self.failure(failreason) -# -# We don't register StopTest because it's better when called by -# another test... -# - - -class StartTest(CTSTest): - '''Start (activate) the cluster manager on a node''' - def __init__(self, cm, debug=None): - CTSTest.__init__(self,cm) - self.name = "start" - self.debug = debug - - def __call__(self, node): - '''Perform the 'start' test. ''' - self.incr("calls") - - if self.CM.upcount() == 0: - self.incr("us") - else: - self.incr("them") - - if self.CM.ShouldBeStatus[node] != "down": - return self.skipped() - elif self.CM.StartaCM(node): - return self.success() - else: - return self.failure("Startup %s on node %s failed" - % (self.Env["Name"], node)) - -# -# We don't register StartTest because it's better when called by -# another test... -# - - class FlipTest(CTSTest): '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Flip" - self.start = StartTest(cm) - self.stop = StopTest(cm) + self._start = StartTest(cm) + self._stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") - if self.CM.ShouldBeStatus[node] == "up": + if self._cm.ShouldBeStatus[node] == "up": self.incr("stopped") - ret = self.stop(node) + ret = self._stop(node) type = "up->down" # Give the cluster time to recognize it's gone... - time.sleep(self.Env["StableTime"]) - elif self.CM.ShouldBeStatus[node] == "down": + time.sleep(self._env["StableTime"]) + elif self._cm.ShouldBeStatus[node] == "down": self.incr("started") - ret = self.start(node) + ret = self._start(node) type = "down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) class RestartTest(CTSTest): '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Restart" - self.start = StartTest(cm) - self.stop = StopTest(cm) - self.benchmark = 1 + self._start = StartTest(cm) + self._stop = StopTest(cm) + self.benchmark = True def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 - if self.CM.StataCM(node): + if self._cm.StataCM(node): self.incr("WasStopped") - if not self.start(node): + if not self._start(node): return self.failure("start (setup) failure: "+node) self.set_timer() - if not self.stop(node): + if not self._stop(node): return self.failure("stop failure: "+node) - if not self.start(node): + if not self._start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) class StonithdTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stonithd" - self.startall = SimulStartLite(cm) - self.benchmark = 1 + self._startall = SimulStartLite(cm) + self.benchmark = True def __call__(self, node): self.incr("calls") - if len(self.Env["nodes"]) < 2: + if len(self._env["nodes"]) < 2: return self.skipped() - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed") - is_dc = self.CM.is_node_dc(node) + is_dc = self._cm.is_node_dc(node) watchpats = [] watchpats.append(self.templates["Pat:Fencing_ok"] % node) watchpats.append(self.templates["Pat:NodeFenced"] % node) - if not self.Env["at-boot"]: + if not self._env["at-boot"]: self.debug("Expecting %s to stay down" % node) - self.CM.ShouldBeStatus[node] = "down" + self._cm.ShouldBeStatus[node] = "down" else: - self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"])) + self.debug("Expecting %s to come up again %d" % (node, self._env["at-boot"])) watchpats.append("%s.* S_STARTING -> S_PENDING" % node) watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node) - watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) + watch = self.create_watch(watchpats, 30 + self._env["DeadTime"] + self._env["StableTime"] + self._env["StartTime"]) watch.set_watch() - origin = self.Env.random_gen.choice(self.Env["nodes"]) + origin = self._env.random_gen.choice(self._env["nodes"]) - (rc, _) = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node) + (rc, _) = self._rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node) if rc == 124: # CRM_EX_TIMEOUT # Look for the patterns, usually this means the required # device was running on the node to be fenced - or that # the required devices were in the process of being loaded # and/or moved # # Effectively the node committed suicide so there will be # no confirmation, but pacemaker should be watching and # fence the node again - self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node)) + self._logger.log("Fencing command on %s to fence %s timed out" % (origin, node)) elif origin != node and rc != 0: self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() + self._cm.cluster_stable() self.debug("Waiting for fenced node to come back up") - self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600) + self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600) - self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc)) + self._logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc)) elif origin == node and rc != 255: # 255 == broken pipe, ie. the node was fenced as expected - self.logger.log("Locally originated fencing returned %d" % rc) + self._logger.log("Locally originated fencing returned %d" % rc) + + with Timer(self._logger, self.name, "fence"): + matched = watch.look_for_all() - self.set_timer("fence") - matched = watch.look_for_all() - self.log_timer("fence") self.set_timer("reform") if watch.unmatched: - self.logger.log("Patterns not found: " + repr(watch.unmatched)) + self._logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() + self._cm.cluster_stable() self.debug("Waiting for fenced node to come back up") - self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600) + self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") - is_stable = self.CM.cluster_stable(self.Env["StartTime"]) + is_stable = self._cm.cluster_stable(self._env["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") self.log_timer("reform") return self.success() - def errorstoignore(self): - return [ - self.templates["Pat:Fencing_start"] % ".*", - self.templates["Pat:Fencing_ok"] % ".*", - self.templates["Pat:Fencing_active"], - r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired", - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [ self.templates["Pat:Fencing_start"] % ".*", + self.templates["Pat:Fencing_ok"] % ".*", + self.templates["Pat:Fencing_active"], + r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired" ] def is_applicable(self): - if not self.is_applicable_common(): + if not CTSTest.is_applicable(self): return False - if "DoFencing" in list(self.Env.keys()): - return self.Env["DoFencing"] + if "DoFencing" in list(self._env.keys()): + return self._env["DoFencing"] return True AllTestClasses.append(StonithdTest) class StartOnebyOne(CTSTest): '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StartOnebyOne" self.stopall = SimulStopLite(cm) - self.start = StartTest(cm) + self._start = StartTest(cm) self.ns = NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed = [] self.set_timer() - for node in self.Env["nodes"]: - if not self.start(node): + for node in self._env["nodes"]: + if not self._start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) class SimulStart(CTSTest): '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStart" self.stopall = SimulStopLite(cm) - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") - if not self.startall(None): + if not self._startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) class SimulStop(CTSTest): '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStop" - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) class StopOnebyOne(CTSTest): '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StopOnebyOne" - self.startall = SimulStartLite(cm) - self.stop = StopTest(cm) + self._startall = SimulStartLite(cm) + self._stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed") failed = [] self.set_timer() - for node in self.Env["nodes"]: - if not self.stop(node): + for node in self._env["nodes"]: + if not self._stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) class RestartOnebyOne(CTSTest): '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RestartOnebyOne" - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed") did_fail = [] self.set_timer() - self.restart = RestartTest(self.CM) - for node in self.Env["nodes"]: + self.restart = RestartTest(self._cm) + for node in self._env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" % (len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) class PartialStart(CTSTest): '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "PartialStart" - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) - self.stop = StopTest(cm) - #self.is_unsafe = 1 + self._stop = StopTest(cm) def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") watchpats = [] watchpats.append("pacemaker-controld.*Connecting to .* cluster infrastructure") - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) + watch = self.create_watch(watchpats, self._env["DeadTime"]+10) watch.set_watch() - self.CM.StartaCMnoBlock(node) + self._cm.StartaCMnoBlock(node) ret = watch.look_for_all() if not ret: - self.logger.log("Patterns not found: " + repr(watch.unmatched)) + self._logger.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) - ret = self.stop(node) + ret = self._stop(node) if not ret: return self.failure("%s did not stop in time" % node) return self.success() - def errorstoignore(self): - '''Return list of errors which should be ignored''' + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ # We might do some fencing in the 2-node case if we make it up far enough - return [ - r"Executing reboot fencing operation", - r"Requesting fencing \([^)]+\) targeting node ", - ] + return [ r"Executing reboot fencing operation", + r"Requesting fencing \([^)]+\) targeting node " ] # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) class StandbyTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Standby" - self.benchmark = 1 + self.benchmark = True - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) + self._start = StartTest(cm) + self._startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Start all nodes failed") self.debug("Make sure node %s is active" % node) - if self.CM.StandbyStatus(node) != "off": - if not self.CM.SetStandbyMode(node, "off"): + if self._cm.StandbyStatus(node) != "off": + if not self._cm.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) - self.CM.cluster_stable() + self._cm.cluster_stable() - status = self.CM.StandbyStatus(node) + status = self._cm.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.debug("Getting resources running on node %s" % node) - rsc_on_node = self.CM.active_resources(node) + rsc_on_node = self._cm.active_resources(node) watchpats = [] watchpats.append(r"State transition .* -> S_POLICY_ENGINE") - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) + watch = self.create_watch(watchpats, self._env["DeadTime"]+10) watch.set_watch() self.debug("Setting node %s to standby mode" % node) - if not self.CM.SetStandbyMode(node, "on"): + if not self._cm.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) self.set_timer("on") ret = watch.look_for_all() if not ret: - self.logger.log("Patterns not found: " + repr(watch.unmatched)) - self.CM.SetStandbyMode(node, "off") + self._logger.log("Patterns not found: " + repr(watch.unmatched)) + self._cm.SetStandbyMode(node, "off") return self.failure("cluster didn't react to standby change on %s" % node) - self.CM.cluster_stable() + self._cm.cluster_stable() - status = self.CM.StandbyStatus(node) + status = self._cm.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.log_timer("on") self.debug("Checking resources") - bad_run = self.CM.active_resources(node) + bad_run = self._cm.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.debug("Setting node %s to active mode" % node) - self.CM.SetStandbyMode(node, "off") + self._cm.SetStandbyMode(node, "off") return rc self.debug("Setting node %s to active mode" % node) - if not self.CM.SetStandbyMode(node, "off"): + if not self._cm.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.set_timer("off") - self.CM.cluster_stable() + self._cm.cluster_stable() - status = self.CM.StandbyStatus(node) + status = self._cm.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.log_timer("off") return self.success() AllTestClasses.append(StandbyTest) class ValgrindTest(CTSTest): '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Valgrind" self.stopall = SimulStopLite(cm) - self.startall = SimulStartLite(cm) - self.is_valgrind = 1 - self.is_loop = 1 + self._startall = SimulStartLite(cm) + self.is_valgrind = True + self.is_loop = True def setup(self, node): self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind, # and clear any valgrind logs from previous runs. For now, we rely on # the user to do this manually. - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Start all nodes failed") return self.success() def teardown(self, node): # Return all nodes to normal # @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks # (no longer used but kept in case feature is restored) leaked = [] - self.stop = StopTest(self.CM) + self._stop = StopTest(self._cm) - for node in self.Env["nodes"]: - rc = self.stop(node) + for node in self._env["nodes"]: + rc = self._stop(node) if not rc: self.failure("Couldn't shut down %s" % node) - (rc, _) = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat) + (rc, _) = self._rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self._logger.logPat) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) - (_, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, verbose=1) + (_, output) = self._rsh(node, "grep -e lost: -e SUMMARY: %s" % self._logger.logPat, verbose=1) for line in output: - self.logger.log(line) - (_, output) = self.rsh(node, "cat %s" % self.logger.logPat, verbose=1) + self._logger.log(line) + (_, output) = self._rsh(node, "cat %s" % self._logger.logPat, verbose=1) for line in output: self.debug(line) - self.rsh(node, "rm -f %s" % self.logger.logPat, verbose=1) + self._rsh(node, "rm -f %s" % self._logger.logPat, verbose=1) return leaked def __call__(self, node): #leaked = self.find_leaks() #if len(leaked) > 0: # return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*", - r"pacemaker-based.*: .* avoid confusing Valgrind", - r"HA_VALGRIND_ENABLED", - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [ r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*", + r"pacemaker-based.*: .* avoid confusing Valgrind", + r"HA_VALGRIND_ENABLED" ] class StandbyLoopTest(ValgrindTest): '''Check for memory leaks by putting a node in and out of standby for an hour''' # @TODO This is not a useful test for memory leaks def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name = "StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 - done = time.time() + self.Env["loop-minutes"] * 60 + done = time.time() + self._env["loop-minutes"] * 60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) - if not self.CM.SetStandbyMode(node, "on"): + if not self._cm.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) - if not self.CM.SetStandbyMode(node, "off"): + if not self._cm.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() #AllTestClasses.append(StandbyLoopTest) class BandwidthTest(CTSTest): # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which the cluster uses''' def __init__(self, cm): CTSTest.__init__(self, cm) + + self.stats["min"] = 0 + self.stats["max"] = 0 + self.stats["totalbandwidth"] = 0 + self.name = "Bandwidth" - self.start = StartTest(cm) - self.__setitem__("min",0) - self.__setitem__("max",0) - self.__setitem__("totalbandwidth",0) + self._start = StartTest(cm) (handle, self.tempfile) = tempfile.mkstemp(".cts") os.close(handle) - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") - if self.CM.upcount() < 1: + if self._cm.upcount() < 1: return self.skipped() - Path = self.CM.InternalCommConfig() + Path = self._cm.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) - (rc, _) = self.rsh(node, dumpcmd) + (rc, _) = self._rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) - self.rsh.copy(farfile, self.tempfile) + self._rsh.copy(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: - self.logger.log("Could not compute bandwidth.") + self._logger.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) - self.logger.log("...bandwidth: %d bits/sec" % intband) - self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth - if self.Stats["min"] == 0: - self.Stats["min"] = Bandwidth - if Bandwidth > self.Stats["max"]: - self.Stats["max"] = Bandwidth - if Bandwidth < self.Stats["min"]: - self.Stats["min"] = Bandwidth - self.rsh(node, "rm -f %s" % fstmpfile) + self._logger.log("...bandwidth: %d bits/sec" % intband) + + self.stats["totalbandwidth"] += Bandwidth + + if self.stats["min"] == 0: + self.stats["min"] = Bandwidth + + if Bandwidth > self.stats["max"]: + self.stats["max"] = Bandwidth + + if Bandwidth < self.stats["min"]: + self.stats["min"] = Bandwidth + + self._rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count + 1 linesplit = line.split(" ") for j in range(len(linesplit)-1): if linesplit[j] == "udp": break if linesplit[j] == "length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: - self.logger.log("Invalid tcpdump line: %s" % line) + self._logger.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = T1.split(":") time2split = timesplit[2].split(".") time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = line.split(" ") for j in range(len(linessplit)-1): if linessplit[j] == "udp": break if linessplit[j] == "length:": break try: sum = int(linessplit[j+1]) + sum except ValueError: - self.logger.log("Invalid tcpdump line: %s" % line) + self._logger.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = T2.split(":") time2split = timesplit[2].split(".") time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return int((sum*8)/time) def is_applicable(self): '''BandwidthTest never applicable''' return False AllTestClasses.append(BandwidthTest) ################################################################### class MaintenanceMode(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "MaintenanceMode" - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) + self._start = StartTest(cm) + self._startall = SimulStartLite(cm) self.max = 30 - #self.is_unsafe = 1 - self.benchmark = 1 + self.benchmark = True self.action = "asyncmon" self.interval = 0 self.rid = "maintenanceDummy" def toggleMaintenanceMode(self, node, action): pats = [] pats.append(self.templates["Pat:DC_IDLE"]) # fail the resource right after turning Maintenance mode on # verify it is not recovered until maintenance mode is turned off if action == "On": pats.append(self.templates["Pat:RscOpFail"] % (self.action, self.rid)) else: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) watch = self.create_watch(pats, 60) watch.set_watch() self.debug("Turning maintenance mode %s" % action) - self.rsh(node, self.templates["MaintenanceMode%s" % (action)]) + self._rsh(node, self.templates["MaintenanceMode%s" % (action)]) if (action == "On"): - self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) + self._rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) + + with Timer(self._logger, self.name, "recover%s" % action): + watch.look_for_all() - self.set_timer("recover%s" % (action)) - watch.look_for_all() - self.log_timer("recover%s" % (action)) if watch.unmatched: self.debug("Failed to find patterns when turning maintenance mode %s" % action) return repr(watch.unmatched) return "" def insertMaintenanceDummy(self, node): pats = [] pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid))) watch = self.create_watch(pats, 60) watch.set_watch() - self.CM.AddDummyRsc(node, self.rid) + self._cm.AddDummyRsc(node, self.rid) - self.set_timer("addDummy") - watch.look_for_all() - self.log_timer("addDummy") + with Timer(self._logger, self.name, "addDummy"): + watch.look_for_all() if watch.unmatched: self.debug("Failed to find patterns when adding maintenance dummy resource") return repr(watch.unmatched) return "" def removeMaintenanceDummy(self, node): pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) watch = self.create_watch(pats, 60) watch.set_watch() - self.CM.RemoveDummyRsc(node, self.rid) + self._cm.RemoveDummyRsc(node, self.rid) - self.set_timer("removeDummy") - watch.look_for_all() - self.log_timer("removeDummy") + with Timer(self._logger, self.name, "removeDummy"): + watch.look_for_all() if watch.unmatched: self.debug("Failed to find patterns when removing maintenance dummy resource") return repr(watch.unmatched) return "" def managedRscList(self, node): rscList = [] - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) + (_, lines) = self._rsh(node, "crm_resource -c", verbose=1) for line in lines: if re.search("^Resource", line): - tmp = AuditResource(self.CM, line) + tmp = AuditResource(self._cm, line) if tmp.managed: rscList.append(tmp.id) return rscList def verifyResources(self, node, rscList, managed): managedList = list(rscList) managed_str = "managed" if not managed: managed_str = "unmanaged" - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) + (_, lines) = self._rsh(node, "crm_resource -c", verbose=1) for line in lines: if re.search("^Resource", line): - tmp = AuditResource(self.CM, line) + tmp = AuditResource(self._cm, line) if managed and not tmp.managed: continue elif not managed and tmp.managed: continue elif managedList.count(tmp.id): managedList.remove(tmp.id) if len(managedList) == 0: self.debug("Found all %s resources on %s" % (managed_str, node)) return True - self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList)) + self._logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList)) return False def __call__(self, node): '''Perform the 'MaintenanceMode' test. ''' self.incr("calls") verify_managed = False verify_unmanaged = False failPat = "" - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed") # get a list of all the managed resources. We use this list # after enabling maintenance mode to verify all managed resources # become un-managed. After maintenance mode is turned off, we use # this list to verify all the resources become managed again. managedResources = self.managedRscList(node) if len(managedResources) == 0: - self.logger.log("No managed resources on %s" % node) + self._logger.log("No managed resources on %s" % node) return self.skipped() # insert a fake resource we can fail during maintenance mode # so we can verify recovery does not take place until after maintenance # mode is disabled. failPat = failPat + self.insertMaintenanceDummy(node) # toggle maintenance mode ON, then fail dummy resource. failPat = failPat + self.toggleMaintenanceMode(node, "On") # verify all the resources are now unmanaged if self.verifyResources(node, managedResources, False): verify_unmanaged = True # Toggle maintenance mode OFF, verify dummy is recovered. failPat = failPat + self.toggleMaintenanceMode(node, "Off") # verify all the resources are now managed again if self.verifyResources(node, managedResources, True): verify_managed = True # Remove our maintenance dummy resource. failPat = failPat + self.removeMaintenanceDummy(node) - self.CM.cluster_stable() + self._cm.cluster_stable() if failPat != "": return self.failure("Unmatched patterns: %s" % (failPat)) elif verify_unmanaged is False: return self.failure("Failed to verify resources became unmanaged during maintenance mode") elif verify_managed is False: return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode") return self.success() - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"Updating failcount for %s" % self.rid, - r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self.rid, - r"Unknown operation: fail", - self.templates["Pat:RscOpOK"] % (self.action, self.rid), - r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [ r"Updating failcount for %s" % self.rid, + r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self.rid, + r"Unknown operation: fail", + self.templates["Pat:RscOpOK"] % (self.action, self.rid), + r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval) ] AllTestClasses.append(MaintenanceMode) class ResourceRecover(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ResourceRecover" - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) + self._start = StartTest(cm) + self._startall = SimulStartLite(cm) self.max = 30 self.rid = None self.rid_alt = None - #self.is_unsafe = 1 - self.benchmark = 1 + self.benchmark = True # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed") # List all resources active on the node (skip test if none) - resourcelist = self.CM.active_resources(node) + resourcelist = self._cm.active_resources(node) if len(resourcelist) == 0: - self.logger.log("No active resources on %s" % node) + self._logger.log("No active resources on %s" % node) return self.skipped() # Choose one resource at random rsc = self.choose_resource(node, resourcelist) if rsc is None: return self.failure("Could not get details of resource '%s'" % self.rid) if rsc.id == rsc.clone_id: self.debug("Failing " + rsc.id) else: self.debug("Failing " + rsc.id + " (also known as " + rsc.clone_id + ")") # Log patterns to watch for (failure, plus restart if managed) pats = [] pats.append(self.templates["Pat:CloneOpFail"] % (self.action, rsc.id, rsc.clone_id)) if rsc.managed: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) if rsc.unique: pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) else: # Anonymous clones may get restarted with a different clone number pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) # Fail resource. (Ideally, we'd fail it twice, to ensure the fail count # is incrementing properly, but it might restart on a different node. # We'd have to temporarily ban it from all other nodes and ensure the # migration-threshold hasn't been reached.) if self.fail_resource(rsc, node, pats) is None: return None # self.failure() already called return self.success() def choose_resource(self, node, resourcelist): """ Choose a random resource to target """ - self.rid = self.Env.random_gen.choice(resourcelist) + self.rid = self._env.random_gen.choice(resourcelist) self.rid_alt = self.rid - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) + (_, lines) = self._rsh(node, "crm_resource -c", verbose=1) for line in lines: if line.startswith("Resource: "): - rsc = AuditResource(self.CM, line) + rsc = AuditResource(self._cm, line) if rsc.id == self.rid: # Handle anonymous clones that get renamed self.rid = rsc.clone_id return rsc return None def get_failcount(self, node): """ Check the fail count of targeted resource on given node """ - (rc, lines) = self.rsh(node, + (rc, lines) = self._rsh(node, "crm_failcount --quiet --query --resource %s " "--operation %s --interval %d " "--node %s" % (self.rid, self.action, self.interval, node), verbose=1) if rc != 0 or len(lines) != 1: - self.logger.log("crm_failcount on %s failed (%d): %s" % (node, rc, + self._logger.log("crm_failcount on %s failed (%d): %s" % (node, rc, " // ".join(map(str.strip, lines)))) return -1 try: failcount = int(lines[0]) except (IndexError, ValueError): - self.logger.log("crm_failcount output on %s unparseable: %s" % (node, + self._logger.log("crm_failcount output on %s unparseable: %s" % (node, ' '.join(lines))) return -1 return failcount def fail_resource(self, rsc, node, pats): """ Fail the targeted resource, and verify as expected """ orig_failcount = self.get_failcount(node) watch = self.create_watch(pats, 60) watch.set_watch() - self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) + self._rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) - self.set_timer("recover") - watch.look_for_all() - self.log_timer("recover") + with Timer(self._logger, self.name, "recover"): + watch.look_for_all() - self.CM.cluster_stable() - recovered = self.CM.ResourceLocation(self.rid) + self._cm.cluster_stable() + recovered = self._cm.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.debug("%s is running on: %s" % (self.rid, repr(recovered))) elif rsc.managed: return self.failure("%s was not recovered and is inactive" % self.rid) new_failcount = self.get_failcount(node) if new_failcount != (orig_failcount + 1): return self.failure("%s fail count is %d not %d" % (self.rid, new_failcount, orig_failcount + 1)) return 0 # Anything but None is success - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"Updating failcount for %s" % self.rid, - r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self.rid, self.rid_alt), - r"Unknown operation: fail", - self.templates["Pat:RscOpOK"] % (self.action, self.rid), - r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [ r"Updating failcount for %s" % self.rid, + r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self.rid, self.rid_alt), + r"Unknown operation: fail", + self.templates["Pat:RscOpOK"] % (self.action, self.rid), + r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval) ] AllTestClasses.append(ResourceRecover) class ComponentFail(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ComponentFail" - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] - self.is_unsafe = 1 + self.is_unsafe = True def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed") - if not self.CM.cluster_stable(self.Env["StableTime"]): + if not self._cm.cluster_stable(self._env["StableTime"]): return self.failure("Setup failed - unstable") - node_is_dc = self.CM.is_node_dc(node, None) + node_is_dc = self._cm.is_node_dc(node, None) # select a component to kill - chosen = self.Env.random_gen.choice(self.complist) + chosen = self._env.random_gen.choice(self.complist) while chosen.dc_only and node_is_dc == 0: - chosen = self.Env.random_gen.choice(self.complist) + chosen = self._env.random_gen.choice(self.complist) self.debug("...component %s (dc=%d)" % (chosen.name, node_is_dc)) self.incr(chosen.name) if chosen.name != "corosync": self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) # @TODO this should be a flag in the Component if chosen.name in [ "corosync", "pacemaker-based", "pacemaker-fenced" ]: # Ignore actions for fence devices if fencer will respawn # (their registration will be lost, and probes will fail) self.okerrpatterns = [ self.templates["Pat:Fencing_active"] ] - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) + (_, lines) = self._rsh(node, "crm_resource -c", verbose=1) for line in lines: if re.search("^Resource", line): - r = AuditResource(self.CM, line) + r = AuditResource(self._cm, line) if r.rclass == "stonith": self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id) self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id) # supply a copy so self.patterns doesn't end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append(self.templates["Pat:Fencing_ok"] % node) stonith = self.create_watch(stonithPats, 0) stonith.set_watch() # set the watch for stable watch = self.create_watch( - tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) + tmpPats, self._env["DeadTime"] + self._env["StableTime"] + self._env["StartTime"]) watch.set_watch() # kill the component chosen.kill(node) self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() + self._cm.cluster_stable() self.debug("Waiting for any fenced node to come back up") - self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600) + self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") - self.CM.cluster_stable(self.Env["StartTime"]) + self._cm.cluster_stable(self._env["StartTime"]) self.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.debug("Found: " + repr(shot)) self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node) - if not self.Env["at-boot"]: - self.CM.ShouldBeStatus[node] = "down" + if not self._env["at-boot"]: + self._cm.ShouldBeStatus[node] = "down" # If fencing occurred, chances are many (if not all) the expected logs # will not be sent - or will be lost when the node reboots return self.success() # check for logs indicating a graceful recovery matched = watch.look_for_all(allow_multiple_matches=True) if watch.unmatched: - self.logger.log("Patterns not found: " + repr(watch.unmatched)) + self._logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to re-stabilize with all nodes") - is_stable = self.CM.cluster_stable(self.Env["StartTime"]) + is_stable = self._cm.cluster_stable(self._env["StartTime"]) if not matched: return self.failure("Didn't find all expected %s patterns" % chosen.name) elif not is_stable: return self.failure("Cluster did not become stable after killing %s" % chosen.name) return self.success() - def errorstoignore(self): - '''Return list of errors which should be ignored''' - # Note that okerrpatterns refers to the last time we ran this test - # The good news is that this works fine for us... + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + # Note that okerrpatterns refers to the last time we ran this test + # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) class SplitBrainTest(CTSTest): '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) - self.is_experimental = 1 + self._start = StartTest(cm) + self._startall = SimulStartLite(cm) + self.is_experimental = True def isolate_partition(self, partition): other_nodes = [] - other_nodes.extend(self.Env["nodes"]) + other_nodes.extend(self._env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: - self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition)) + self._logger.log("Node "+node+" not in " + repr(self._env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.debug("Creating partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: - if not self.CM.isolate_node(node, other_nodes): - self.logger.log("Could not isolate %s" % node) + if not self._cm.isolate_node(node, other_nodes): + self._logger.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] - other_nodes.extend(self.Env["nodes"]) + other_nodes.extend(self._env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: - self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"])) + self._logger.log("Node "+node+" not in " + repr(self._env["nodes"])) if len(other_nodes) == 0: return 1 self.debug("Healing partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: - self.CM.unisolate_node(node, other_nodes) + self._cm.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") - self.passed = 1 + self.passed = True partitions = {} - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} - p_max = len(self.Env["nodes"]) - for node in self.Env["nodes"]: - p = self.Env.random_gen.randint(1, p_max) + p_max = len(self._env["nodes"]) + for node in self._env["nodes"]: + p = self._env.random_gen.randint(1, p_max) if not p in partitions: partitions[p] = [] partitions[p].append(node) p_max = len(list(partitions.keys())) if p_max > 1: break # else, try again self.debug("Created %d partitions" % p_max) for key in list(partitions.keys()): self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now - self.rsh(node, "crm_attribute -V -n stonith-enabled -v false") + self._rsh(node, "crm_attribute -V -n stonith-enabled -v false") for key in list(partitions.keys()): self.isolate_partition(partitions[key]) count = 30 while count > 0: - if len(self.CM.find_partitions()) != p_max: + if len(self._cm.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability - if not self.CM.cluster_stable(): + if not self._cm.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state - self.CM.partitions_expected = p_max + self._cm.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") - self.CM.partitions_expected = 1 + self._cm.partitions_expected = 1 # And heal them again for key in list(partitions.keys()): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: - if len(self.CM.find_partitions()) != 1: + if len(self._cm.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] - partitions = self.CM.find_partitions() + partitions = self._cm.find_partitions() if len(partitions) > 0: members = partitions[0].split() - if len(members) != len(self.Env["nodes"]): + if len(members) != len(self._env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state - if not self.CM.cluster_stable(1200): + if not self._cm.cluster_stable(1200): self.failure("Reformed cluster not stable") - if self.Env["continue"]: + if self._env["continue"]: answer = "Y" else: try: answer = input('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on - if self.Env["DoFencing"]: - self.rsh(node, "crm_attribute -V -D -n stonith-enabled") + if self._env["DoFencing"]: + self._rsh(node, "crm_attribute -V -D -n stonith-enabled") - self.CM.cluster_stable() + self._cm.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") - def errorstoignore(self): - '''Return list of errors which are 'normal' and should be ignored''' - return [ - r"Another DC detected:", - r"(ERROR|error).*: .*Application of an update diff failed", - r"pacemaker-controld.*:.*not in our membership list", - r"CRIT:.*node.*returning after partition", - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [ r"Another DC detected:", + r"(ERROR|error).*: .*Application of an update diff failed", + r"pacemaker-controld.*:.*not in our membership list", + r"CRIT:.*node.*returning after partition" ] def is_applicable(self): - if not self.is_applicable_common(): + if not CTSTest.is_applicable(self): return False - return len(self.Env["nodes"]) > 2 + return len(self._env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) class Reattach(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Reattach" - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) - self.is_unsafe = 0 # Handled by canrunnow() + self.is_unsafe = False def _is_managed(self, node): - (_, is_managed) = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", verbose=1) + (_, is_managed) = self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", verbose=1) is_managed = is_managed[0].strip() return is_managed == "true" def _set_unmanaged(self, node): self.debug("Disable resource management") - self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false") + self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false") def _set_managed(self, node): self.debug("Re-enable resource management") - self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D") + self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D") def setup(self, node): attempt = 0 - if not self.startall(None): + if not self._startall(None): return None # Make sure we are really _really_ stable and that all # resources, including those that depend on transient node # attributes, are started - while not self.CM.cluster_stable(double_check=True): + while not self._cm.cluster_stable(double_check=True): if attempt < 5: attempt += 1 self.debug("Not stable yet, re-testing") else: - self.logger.log("Cluster is not stable") + self._logger.log("Cluster is not stable") return None return 1 def teardown(self, node): # Make sure 'node' is up - start = StartTest(self.CM) + start = StartTest(self._cm) start(node) if not self._is_managed(node): - self.logger.log("Attempting to re-enable resource management on %s" % node) + self._logger.log("Attempting to re-enable resource management on %s" % node) self._set_managed(node) - self.CM.cluster_stable() + self._cm.cluster_stable() if not self._is_managed(node): - self.logger.log("Could not re-enable resource management") + self._logger.log("Could not re-enable resource management") return 0 return 1 - def canrunnow(self, node): - '''Return TRUE if we can meaningfully run right now''' - if self.find_ocfs2_resources(node): - self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present") - return 0 - return 1 + def can_run_now(self, node): + """ Return True if we can meaningfully run right now""" + if self._find_ocfs2_resources(node): + self._logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present") + return False + + return True def __call__(self, node): self.incr("calls") pats = [] # Conveniently, the scheduler will display this message when disabling # management, even if fencing is not enabled, so we can rely on it. managed = self.create_watch(["No fencing will be done"], 60) managed.set_watch() self._set_unmanaged(node) if not managed.look_for_all(): - self.logger.log("Patterns not found: " + repr(managed.unmatched)) + self._logger.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*")) watch = self.create_watch(pats, 60, "ShutdownActivity") watch.set_watch() self.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self._set_managed(node) return self.failure("Couldn't shut down the cluster") self.debug("Bringing the cluster back up") - ret = self.startall(None) + ret = self._startall(None) time.sleep(5) # allow ping to update the CIB if not ret: self._set_managed(node) return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self._set_managed(node) return self.failure("Resources stopped or started during cluster restart") watch = self.create_watch(pats, 60, "StartupActivity") watch.set_watch() # Re-enable resource management (and verify it happened). self._set_managed(node) - self.CM.cluster_stable() + self._cm.cluster_stable() if not self._is_managed(node): return self.failure("Could not re-enable resource management") # Ignore actions for STONITH resources ignore = [] - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) + (_, lines) = self._rsh(node, "crm_resource -c", verbose=1) for line in lines: if re.search("^Resource", line): - r = AuditResource(self.CM, line) + r = AuditResource(self._cm, line) if r.rclass == "stonith": self.debug("Ignoring start actions for %s" % r.id) ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id)) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"resource( was|s were) active at shutdown", - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [ r"resource( was|s were) active at shutdown" ] def is_applicable(self): return True AllTestClasses.append(Reattach) class SpecialTest1(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SpecialTest1" - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Could not stop all nodes") # Test config recovery when the other nodes come up - self.rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*") + self._rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*") # Start the selected node ret = self.restart1(node) if not ret: return self.failure("Could not start "+node) # Start all remaining nodes - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Could not start the remaining nodes") return self.success() - def errorstoignore(self): - '''Return list of errors which should be ignored''' + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + # Errors that occur as a result of the CIB being wiped - return [ - r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed", - r"error.*: Resource start-up disabled since no STONITH resources have been defined", - r"error.*: Either configure some or disable STONITH with the stonith-enabled option", - r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity", - ] + return [ r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed", + r"error.*: Resource start-up disabled since no STONITH resources have been defined", + r"error.*: Either configure some or disable STONITH with the stonith-enabled option", + r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity" ] AllTestClasses.append(SpecialTest1) class HAETest(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "HAETest" self.stopall = SimulStopLite(cm) - self.startall = SimulStartLite(cm) - self.is_loop = 1 + self._startall = SimulStartLite(cm) + self.is_loop = True def setup(self, node): # Start all remaining nodes - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active = 0 - (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, verbose=1) + (rc, lines) = self._rsh(node, "crm_resource -r %s -W -Q" % resource, verbose=1) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.debug("Resource %s is still inactive" % resource) elif rc == 234: - self.logger.log("Unknown resource %s" % resource) + self._logger.log("Unknown resource %s" % resource) return 0 elif rc == 246: - self.logger.log("Cluster is inactive") + self._logger.log("Cluster is inactive") return 0 elif rc != 0: - self.logger.log("Call to crm_resource failed, rc=%d" % rc) + self._logger.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None - (_, lines) = self.rsh(node, "crm_resource -c", verbose=1) + (_, lines) = self._rsh(node, "crm_resource -c", verbose=1) for line in lines: if re.search("^Resource", line): - r = AuditResource(self.CM, line) + r = AuditResource(self._cm, line) if r.rtype == "controld" and r.parent != "NA": self.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None - self.r_o2cb = None - self.r_ocfs2 = [] + self._r_o2cb = None + self._r_ocfs2 = [] if self.find_dlm(node): - self.find_ocfs2_resources(node) + self._find_ocfs2_resources(node) def is_applicable(self): - if not self.is_applicable_common(): + if not CTSTest.is_applicable(self): return False - if self.Env["Schema"] == "hae": + if self._env["Schema"] == "hae": return True return None class HAERoleTest(HAETest): def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name = "HAERoleTest" def change_state(self, node, resource, target): - (rc, _) = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target)) + (rc, _) = self._rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 - done = time.time() + self.Env["loop-minutes"]*60 + done = time.time() + self._env["loop-minutes"]*60 self.find_hae_resources(node) - clone_max = len(self.Env["nodes"]) + clone_max = len(self._env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc - if not self.wait_on_state(node, self.r_o2cb, clone_max): - self.failure("%s did not come up correctly" % self.r_o2cb) + if not self.wait_on_state(node, self._r_o2cb, clone_max): + self.failure("%s did not come up correctly" % self._r_o2cb) failed = lpc - for fs in self.r_ocfs2: + for fs in self._r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) class HAEStandbyTest(HAETest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name = "HAEStandbyTest" def change_state(self, node, resource, target): - (rc, _) = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target)) + (rc, _) = self._rsh(node, "crm_standby -V -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 - done = time.time() + self.Env["loop-minutes"]*60 + done = time.time() + self._env["loop-minutes"]*60 self.find_hae_resources(node) - clone_max = len(self.Env["nodes"]) + clone_max = len(self._env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc - if not self.wait_on_state(node, self.r_o2cb, clone_max): - self.failure("%s did not come up correctly" % self.r_o2cb) + if not self.wait_on_state(node, self._r_o2cb, clone_max): + self.failure("%s did not come up correctly" % self._r_o2cb) failed = lpc - for fs in self.r_ocfs2: + for fs in self._r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) class NearQuorumPointTest(CTSTest): ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] - stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint") + stonith = self._cm.prepare_fencing_watcher("NearQuorumPoint") #decide what to do with each node - for node in self.Env["nodes"]: - action = self.Env.random_gen.choice(["start","stop"]) - #action = self.Env.random_gen.choice(["start","stop","no change"]) + for node in self._env["nodes"]: + action = self._env.random_gen.choice(["start","stop"]) + #action = self._env.random_gen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.debug("start nodes:" + repr(startset)) self.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: - if self.CM.ShouldBeStatus[node] == "up": + if self._cm.ShouldBeStatus[node] == "up": watchpats.append(self.templates["Pat:We_stopped"] % node) for node in startset: - if self.CM.ShouldBeStatus[node] == "down": + if self._cm.ShouldBeStatus[node] == "down": #watchpats.append(self.templates["Pat:NonDC_started"] % node) watchpats.append(self.templates["Pat:Local_started"] % node) else: for stopping in stopset: - if self.CM.ShouldBeStatus[stopping] == "up": - watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) + if self._cm.ShouldBeStatus[stopping] == "up": + watchpats.append(self.templates["Pat:They_stopped"] % (node, self._cm.key_for_node(stopping))) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.templates["Pat:DC_IDLE"]) - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) + watch = self.create_watch(watchpats, self._env["DeadTime"]+10) watch.set_watch() #begin actions for node in stopset: - if self.CM.ShouldBeStatus[node] == "up": - self.CM.StopaCMnoBlock(node) + if self._cm.ShouldBeStatus[node] == "up": + self._cm.StopaCMnoBlock(node) for node in startset: - if self.CM.ShouldBeStatus[node] == "down": - self.CM.StartaCMnoBlock(node) + if self._cm.ShouldBeStatus[node] == "down": + self._cm.StartaCMnoBlock(node) #get the result if watch.look_for_all(): - self.CM.cluster_stable() - self.CM.fencing_cleanup("NearQuorumPoint", stonith) + self._cm.cluster_stable() + self._cm.fencing_cleanup("NearQuorumPoint", stonith) return self.success() - self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched)) + self._logger.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: - if self.CM.StataCM(node) == 1: + if self._cm.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: - if self.CM.StataCM(node) == 0: + if self._cm.StataCM(node) == 0: downnodes.append(node) - self.CM.fencing_cleanup("NearQuorumPoint", stonith) + self._cm.fencing_cleanup("NearQuorumPoint", stonith) if upnodes == [] and downnodes == []: - self.CM.cluster_stable() + self._cm.cluster_stable() # Make sure they're completely down with no residule for node in stopset: - self.rsh(node, self.templates["StopCmd"]) + self._rsh(node, self.templates["StopCmd"]) return self.success() if len(upnodes) > 0: - self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes)) + self._logger.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: - self.logger.log("Warn: Unstartable nodes: " + repr(downnodes)) + self._logger.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() def is_applicable(self): return True AllTestClasses.append(NearQuorumPointTest) class RollingUpgradeTest(CTSTest): '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RollingUpgrade" - self.start = StartTest(cm) - self.stop = StopTest(cm) + self._start = StartTest(cm) + self._stop = StopTest(cm) self.stopall = SimulStopLite(cm) - self.startall = SimulStartLite(cm) + self._startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") - for node in self.Env["nodes"]: + for node in self._env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") - for node in self.Env["nodes"]: + for node in self._env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version - src_dir = "%s/%s" % (self.Env["rpm-dir"], version) + src_dir = "%s/%s" % (self._env["rpm-dir"], version) - self.logger.log("Installing %s on %s with %s" % (version, node, flags)) - if not self.stop(node): + self._logger.log("Installing %s on %s with %s" % (version, node, flags)) + if not self._stop(node): return self.failure("stop failure: "+node) - self.rsh(node, "mkdir -p %s" % target_dir) - self.rsh(node, "rm -f %s/*.rpm" % target_dir) - (_, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, verbose=1) + self._rsh(node, "mkdir -p %s" % target_dir) + self._rsh(node, "rm -f %s/*.rpm" % target_dir) + (_, lines) = self._rsh(node, "ls -1 %s/*.rpm" % src_dir, verbose=1) for line in lines: line = line[:-1] - rc = self.rsh.copy("%s" % (line), "%s:%s/" % (node, target_dir)) - self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) + rc = self._rsh.copy("%s" % (line), "%s:%s/" % (node, target_dir)) + self._rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) - if start and not self.start(node): + if start and not self._start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): - return self.install(node, self.Env["current-version"], start) + return self.install(node, self._env["current-version"], start) def downgrade(self, node, start=1): - return self.install(node, self.Env["previous-version"], start, "--force --nodeps") + return self.install(node, self._env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") - for node in self.Env["nodes"]: + for node in self._env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) - self.CM.cluster_stable() + self._cm.cluster_stable() return self.success() def is_applicable(self): - if not self.is_applicable_common(): + if not CTSTest.is_applicable(self): return None - if not "rpm-dir" in list(self.Env.keys()): + if not "rpm-dir" in list(self._env.keys()): return None - if not "current-version" in list(self.Env.keys()): + if not "current-version" in list(self._env.keys()): return None - if not "previous-version" in list(self.Env.keys()): + if not "previous-version" in list(self._env.keys()): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) class BSC_AddResource(CTSTest): '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "AddResource" self.resource_offset = 0 self.cib_cmd = """cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) - watch = self.create_watch(patterns, self.Env["DeadTime"]) + watch = self.create_watch(patterns, self._env["DeadTime"]) watch.set_watch() ip = self.NextIP() if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.look_for_all() if watch.unmatched: for regex in watch.unmatched: - self.logger.log ("Warn: Pattern not found: %s" % (regex)) + self._logger.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") - if not self.CM.cluster_stable(self.Env["DeadTime"]): + if not self._cm.cluster_stable(self._env["DeadTime"]): return self.failure("Unstable cluster") return self.success() def NextIP(self): - ip = self.Env["IPBase"] + ip = self._env["IPBase"] if ":" in ip: fields = ip.rpartition(":") fields[2] = str(hex(int(fields[2], 16)+1)) print(str(hex(int(f[2], 16)+1))) else: fields = ip.rpartition('.') fields[2] = str(int(fields[2])+1) ip = fields[0] + fields[1] + fields[3]; - self.Env["IPBase"] = ip + self._env["IPBase"] = ip return ip.strip() def make_ip_resource(self, node, id, rclass, type, ip): - self.logger.log("Creating %s:%s:%s (%s) on %s" % (rclass,type,id,ip,node)) + self._logger.log("Creating %s:%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint = """ """ % (id, id, id, id, node) rc = 0 - (rc, _) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), verbose=1) + (rc, _) = self._rsh(node, self.cib_cmd % ("constraints", node_constraint), verbose=1) if rc != 0: - self.logger.log("Constraint creation failed: %d" % rc) + self._logger.log("Constraint creation failed: %d" % rc) return None - (rc, _) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), verbose=1) + (rc, _) = self._rsh(node, self.cib_cmd % ("resources", rsc_xml), verbose=1) if rc != 0: - self.logger.log("Resource creation failed: %d" % rc) + self._logger.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): - if self.Env["DoBSC"]: + if self._env["DoBSC"]: return True return None AllTestClasses.append(BSC_AddResource) -class SimulStopLite(CTSTest): - '''Stop any active nodes ~ simultaneously''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "SimulStopLite" - - def __call__(self, dummy): - '''Perform the 'SimulStopLite' setup work. ''' - self.incr("calls") - - self.debug("Setup: " + self.name) - - # We ignore the "node" parameter... - watchpats = [ ] - - for node in self.Env["nodes"]: - if self.CM.ShouldBeStatus[node] == "up": - self.incr("WasStarted") - watchpats.append(self.templates["Pat:We_stopped"] % node) - - if len(watchpats) == 0: - return self.success() - - # Stop all the nodes - at about the same time... - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) - - watch.set_watch() - self.set_timer() - for node in self.Env["nodes"]: - if self.CM.ShouldBeStatus[node] == "up": - self.CM.StopaCMnoBlock(node) - if watch.look_for_all(): - # Make sure they're completely down with no residule - for node in self.Env["nodes"]: - self.rsh(node, self.templates["StopCmd"]) - - return self.success() - - did_fail = 0 - up_nodes = [] - for node in self.Env["nodes"]: - if self.CM.StataCM(node) == 1: - did_fail = 1 - up_nodes.append(node) - - if did_fail: - return self.failure("Active nodes exist: " + repr(up_nodes)) - - self.logger.log("Warn: All nodes stopped but CTS didn't detect: " - + repr(watch.unmatched)) - - return self.failure("Missing log message: "+repr(watch.unmatched)) - - def is_applicable(self): - '''SimulStopLite is a setup test and never applicable''' - return False - - -class SimulStartLite(CTSTest): - '''Start any stopped nodes ~ simultaneously''' - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = "SimulStartLite" - - def __call__(self, dummy): - '''Perform the 'SimulStartList' setup work. ''' - self.incr("calls") - self.debug("Setup: " + self.name) - - # We ignore the "node" parameter... - node_list = [] - for node in self.Env["nodes"]: - if self.CM.ShouldBeStatus[node] == "down": - self.incr("WasStopped") - node_list.append(node) - - self.set_timer() - while len(node_list) > 0: - # Repeat until all nodes come up - watchpats = [ ] - - uppat = self.templates["Pat:NonDC_started"] - if self.CM.upcount() == 0: - uppat = self.templates["Pat:Local_started"] - - watchpats.append(self.templates["Pat:DC_IDLE"]) - for node in node_list: - watchpats.append(uppat % node) - watchpats.append(self.templates["Pat:InfraUp"] % node) - watchpats.append(self.templates["Pat:PacemakerUp"] % node) - - # Start all the nodes - at about the same time... - watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) - watch.set_watch() - - stonith = self.CM.prepare_fencing_watcher(self.name) - - for node in node_list: - self.CM.StartaCMnoBlock(node) - - watch.look_for_all() - - node_list = self.CM.fencing_cleanup(self.name, stonith) - - if node_list == None: - return self.failure("Cluster did not stabilize") - - # Remove node_list messages from watch.unmatched - for node in node_list: - self.logger.debug("Dealing with stonith operations for %s" % repr(node_list)) - if watch.unmatched: - try: - watch.unmatched.remove(uppat % node) - except: - self.debug("Already matched: %s" % (uppat % node)) - try: - watch.unmatched.remove(self.templates["Pat:InfraUp"] % node) - except: - self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node)) - try: - watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node) - except: - self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node)) - - if watch.unmatched: - for regex in watch.unmatched: - self.logger.log ("Warn: Startup pattern not found: %s" %(regex)) - - if not self.CM.cluster_stable(): - return self.failure("Cluster did not stabilize") - - did_fail = 0 - unstable = [] - for node in self.Env["nodes"]: - if self.CM.StataCM(node) == 0: - did_fail = 1 - unstable.append(node) - - if did_fail: - return self.failure("Unstarted nodes exist: " + repr(unstable)) - - unstable = [] - for node in self.Env["nodes"]: - if not self.CM.node_stable(node): - did_fail = 1 - unstable.append(node) - - if did_fail: - return self.failure("Unstable cluster nodes exist: " + repr(unstable)) - - return self.success() - - def is_applicable(self): - '''SimulStartLite is a setup test and never applicable''' - return False - - def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): - bound_test.Audits = audits + bound_test.audits = audits result.append(bound_test) return result class RemoteLXC(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteLXC" - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) + self._start = StartTest(cm) + self._startall = SimulStartLite(cm) self.num_containers = 2 - self.is_container = 1 - self.failed = 0 + self.is_container = True self.fail_string = "" def start_lxc_simple(self, node): # restore any artifacts laying around from a previous test. - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") + self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") # generate the containers, put them in the config, add some resources to them pats = [ ] watch = self.create_watch(pats, 120) watch.set_watch() pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms")) - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers) - self.set_timer("remoteSimpleInit") - watch.look_for_all() - self.log_timer("remoteSimpleInit") + self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers) + + with Timer(self._logger, self.name, "remoteSimpleInit"): + watch.look_for_all() + if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.failed = 1 + self.failed = True def cleanup_lxc_simple(self, node): pats = [ ] # if the test failed, attempt to clean up the cib and libvirt environment # as best as possible - if self.failed == 1: + if self.failed: # restore libvirt and cib - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") + self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") return watch = self.create_watch(pats, 120) watch.set_watch() pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2")) - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null") - self.set_timer("remoteSimpleCleanup") - watch.look_for_all() - self.log_timer("remoteSimpleCleanup") + self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null") + + with Timer(self._logger, self.name, "remoteSimpleCleanup"): + watch.look_for_all() if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.failed = 1 + self.failed = True # cleanup libvirt - self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") + self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") def __call__(self, node): '''Perform the 'RemoteLXC' test. ''' self.incr("calls") - ret = self.startall(None) + ret = self._startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") - (rc, _) = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null") + (rc, _) = self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null") if rc == 1: self.log("Environment test for lxc support failed.") return self.skipped() self.start_lxc_simple(node) self.cleanup_lxc_simple(node) self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() + self._cm.cluster_stable() - if self.failed == 1: + if self.failed: return self.failure(self.fail_string) return self.success() - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ - r"Updating failcount for ping", - r"schedulerd.*: Recover\s+(ping|lxc-ms|container)\s+\(.*\)", - # The orphaned lxc-ms resource causes an expected transition error - # that is a result of the scheduler not having knowledge that the - # promotable resource used to be a clone. As a result, it looks like that - # resource is running in multiple locations when it shouldn't... But in - # this instance we know why this error is occurring and that it is expected. - r"Calculated [Tt]ransition .*pe-error", - r"Resource lxc-ms .* is active on 2 nodes attempting recovery", - r"Unknown operation: fail", - r"VirtualDomain.*ERROR: Unable to determine emulator", - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [ r"Updating failcount for ping", + r"schedulerd.*: Recover\s+(ping|lxc-ms|container)\s+\(.*\)", + # The orphaned lxc-ms resource causes an expected transition error + # that is a result of the scheduler not having knowledge that the + # promotable resource used to be a clone. As a result, it looks like that + # resource is running in multiple locations when it shouldn't... But in + # this instance we know why this error is occurring and that it is expected. + r"Calculated [Tt]ransition .*pe-error", + r"Resource lxc-ms .* is active on 2 nodes attempting recovery", + r"Unknown operation: fail", + r"VirtualDomain.*ERROR: Unable to determine emulator" ] AllTestClasses.append(RemoteLXC) -class RemoteDriver(CTSTest): - - def __init__(self, cm): - CTSTest.__init__(self,cm) - self.name = self.__class__.__name__ - self.start = StartTest(cm) - self.startall = SimulStartLite(cm) - self.stop = StopTest(cm) - self.remote_rsc = "remote-rsc" - self.cib_cmd = """cibadmin -C -o %s -X '%s' """ - self.reset() - - def reset(self): - self.pcmk_started = 0 - self.failed = False - self.fail_string = "" - self.remote_node_added = 0 - self.remote_rsc_added = 0 - self.remote_use_reconnect_interval = self.Env.random_gen.choice([True,False]) - - def fail(self, msg): - """ Mark test as failed. """ - - self.failed = True - - # Always log the failure. - self.logger.log(msg) - - # Use first failure as test status, as it's likely to be most useful. - if not self.fail_string: - self.fail_string = msg - - def get_othernode(self, node): - for othernode in self.Env["nodes"]: - if othernode == node: - # we don't want to try and use the cib that we just shutdown. - # find a cluster node that is not our soon to be remote-node. - continue - else: - return othernode - - def del_rsc(self, node, rsc): - othernode = self.get_othernode(node) - (rc, _) = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) - if rc != 0: - self.fail("Removal of resource '%s' failed" % rsc) - - def add_rsc(self, node, rsc_xml): - othernode = self.get_othernode(node) - (rc, _) = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) - if rc != 0: - self.fail("resource creation failed") - - def add_primitive_rsc(self, node): - rsc_xml = """ - - - - - -""" % { "node": self.remote_rsc } - self.add_rsc(node, rsc_xml) - if not self.failed: - self.remote_rsc_added = 1 - - def add_connection_rsc(self, node): - rsc_xml = """ - - - -""" % { "node": self.remote_node, "server": node } - - if self.remote_use_reconnect_interval: - # Set reconnect interval on resource - rsc_xml = rsc_xml + """ - -""" % (self.remote_node) - - rsc_xml = rsc_xml + """ - - - - - - -""" % { "node": self.remote_node } - - self.add_rsc(node, rsc_xml) - if not self.failed: - self.remote_node_added = 1 - - def disable_services(self, node): - self.corosync_enabled = self.Env.service_is_enabled(node, "corosync") - if self.corosync_enabled: - self.Env.disable_service(node, "corosync") - - self.pacemaker_enabled = self.Env.service_is_enabled(node, "pacemaker") - if self.pacemaker_enabled: - self.Env.disable_service(node, "pacemaker") - - def restore_services(self, node): - if self.corosync_enabled: - self.Env.enable_service(node, "corosync") - - if self.pacemaker_enabled: - self.Env.enable_service(node, "pacemaker") - - def stop_pcmk_remote(self, node): - # disable pcmk remote - for i in range(10): - (rc, _) = self.rsh(node, "service pacemaker_remote stop") - if rc != 0: - time.sleep(6) - else: - break - - def start_pcmk_remote(self, node): - for i in range(10): - (rc, _) = self.rsh(node, "service pacemaker_remote start") - if rc != 0: - time.sleep(6) - else: - self.pcmk_started = 1 - break - - def freeze_pcmk_remote(self, node): - """ Simulate a Pacemaker Remote daemon failure. """ - - # We freeze the process. - self.rsh(node, "killall -STOP pacemaker-remoted") - - def resume_pcmk_remote(self, node): - # We resume the process. - self.rsh(node, "killall -CONT pacemaker-remoted") - - def start_metal(self, node): - # Cluster nodes are reused as remote nodes in remote tests. If cluster - # services were enabled at boot, in case the remote node got fenced, the - # cluster node would join instead of the expected remote one. Meanwhile - # pacemaker_remote would not be able to start. Depending on the chances, - # the situations might not be able to be orchestrated gracefully any more. - # - # Temporarily disable any enabled cluster serivces. - self.disable_services(node) - - pcmk_started = 0 - - # make sure the resource doesn't already exist for some reason - self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc)) - self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node)) - - if not self.stop(node): - self.fail("Failed to shutdown cluster node %s" % node) - return - - self.start_pcmk_remote(node) - - if self.pcmk_started == 0: - self.fail("Failed to start pacemaker_remote on node %s" % node) - return - - # Convert node to baremetal now that it has shutdown the cluster stack - pats = [ ] - watch = self.create_watch(pats, 120) - watch.set_watch() - pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) - pats.append(self.templates["Pat:DC_IDLE"]) - - self.add_connection_rsc(node) - - self.set_timer("remoteMetalInit") - watch.look_for_all() - self.log_timer("remoteMetalInit") - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - - def migrate_connection(self, node): - if self.failed: - return - - pats = [ ] - pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node)) - pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node)) - pats.append(self.templates["Pat:DC_IDLE"]) - watch = self.create_watch(pats, 120) - watch.set_watch() - - (rc, _) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), verbose=1) - if rc != 0: - self.fail("failed to move remote node connection resource") - return - - self.set_timer("remoteMetalMigrate") - watch.look_for_all() - self.log_timer("remoteMetalMigrate") - - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - return - - def fail_rsc(self, node): - if self.failed: - return - - watchpats = [ ] - watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node)) - watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) - watchpats.append(self.templates["Pat:DC_IDLE"]) - - watch = self.create_watch(watchpats, 120) - watch.set_watch() - - self.debug("causing dummy rsc to fail.") - - self.rsh(node, "rm -f /var/run/resource-agents/Dummy*") - - self.set_timer("remoteRscFail") - watch.look_for_all() - self.log_timer("remoteRscFail") - if watch.unmatched: - self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched) - - def fail_connection(self, node): - if self.failed: - return - - watchpats = [ ] - watchpats.append(self.templates["Pat:Fencing_ok"] % self.remote_node) - watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node) - - watch = self.create_watch(watchpats, 120) - watch.set_watch() - - # freeze the pcmk remote daemon. this will result in fencing - self.debug("Force stopped active remote node") - self.freeze_pcmk_remote(node) - - self.debug("Waiting for remote node to be fenced.") - self.set_timer("remoteMetalFence") - watch.look_for_all() - self.log_timer("remoteMetalFence") - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - return - - self.debug("Waiting for the remote node to come back up") - self.CM.ns.wait_for_node(node, 120); - - pats = [ ] - watch = self.create_watch(pats, 240) - watch.set_watch() - pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) - if self.remote_rsc_added == 1: - pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) - - # start the remote node again watch it integrate back into cluster. - self.start_pcmk_remote(node) - if self.pcmk_started == 0: - self.fail("Failed to start pacemaker_remote on node %s" % node) - return - - self.debug("Waiting for remote node to rejoin cluster after being fenced.") - self.set_timer("remoteMetalRestart") - watch.look_for_all() - self.log_timer("remoteMetalRestart") - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - return - - def add_dummy_rsc(self, node): - if self.failed: - return - - # verify we can put a resource on the remote node - pats = [ ] - watch = self.create_watch(pats, 120) - watch.set_watch() - pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) - pats.append(self.templates["Pat:DC_IDLE"]) - - # Add a resource that must live on remote-node - self.add_primitive_rsc(node) - - # force that rsc to prefer the remote node. - (rc, _) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), verbose=1) - if rc != 0: - self.fail("Failed to place remote resource on remote node.") - return - - self.set_timer("remoteMetalRsc") - watch.look_for_all() - self.log_timer("remoteMetalRsc") - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - - def test_attributes(self, node): - if self.failed: - return - - # This verifies permanent attributes can be set on a remote-node. It also - # verifies the remote-node can edit its own cib node section remotely. - (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), verbose=1) - if rc != 0: - self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)) - return - - (rc, _) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), verbose=1) - if rc != 0: - self.fail("Failed to get remote-node attribute") - return - - (rc, _) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), verbose=1) - if rc != 0: - self.fail("Failed to delete remote-node attribute") - return - - def cleanup_metal(self, node): - self.restore_services(node) - - if self.pcmk_started == 0: - return - - pats = [ ] - - watch = self.create_watch(pats, 120) - watch.set_watch() - - if self.remote_rsc_added == 1: - pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc)) - if self.remote_node_added == 1: - pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node)) - - self.set_timer("remoteMetalCleanup") - - self.resume_pcmk_remote(node) - - if self.remote_rsc_added == 1: - - # Remove dummy resource added for remote node tests - self.debug("Cleaning up dummy rsc put on remote node") - self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % self.remote_rsc) - self.del_rsc(node, self.remote_rsc) - - if self.remote_node_added == 1: - - # Remove remote node's connection resource - self.debug("Cleaning up remote node connection resource") - self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % (self.remote_node)) - self.del_rsc(node, self.remote_node) - - watch.look_for_all() - self.log_timer("remoteMetalCleanup") - - if watch.unmatched: - self.fail("Unmatched patterns: %s" % watch.unmatched) - - self.stop_pcmk_remote(node) - - self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() - - if self.remote_node_added == 1: - # Remove remote node itself - self.debug("Cleaning up node entry for remote node") - self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node) - - def setup_env(self, node): - - self.remote_node = "remote-%s" % (node) - - # we are assuming if all nodes have a key, that it is - # the right key... If any node doesn't have a remote - # key, we regenerate it everywhere. - if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]): - return - - # create key locally - (handle, keyfile) = tempfile.mkstemp(".cts") - os.close(handle) - subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"], - stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) - - # sync key throughout the cluster - for node in self.Env["nodes"]: - self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker") - self.rsh.copy(keyfile, "root@%s:/etc/pacemaker/authkey" % node) - self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey") - self.rsh(node, "chmod 0640 /etc/pacemaker/authkey") - os.unlink(keyfile) - - def is_applicable(self): - if not self.is_applicable_common(): - return False - - for node in self.Env["nodes"]: - (rc, _) = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1") - if rc != 0: - return False - return True - - def start_new_test(self, node): - self.incr("calls") - self.reset() - - ret = self.startall(None) - if not ret: - return self.failure("setup failed: could not start all nodes") - - self.setup_env(node) - self.start_metal(node) - self.add_dummy_rsc(node) - return True - - def __call__(self, node): - return self.failure("This base class is not meant to be called directly.") - - def errorstoignore(self): - '''Return list of errors which should be ignored''' - return [ r"""is running on remote.*which isn't allowed""", - r"""Connection terminated""", - r"""Could not send remote""", - ] - -# RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses - - class RemoteBasic(RemoteDriver): + def __init__(self, cm): + RemoteDriver.__init__(self, cm) + self.name = "RemoteBasic" def __call__(self, node): '''Perform the 'RemoteBaremetal' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.test_attributes(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() + self._cm.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() AllTestClasses.append(RemoteBasic) class RemoteStonithd(RemoteDriver): + def __init__(self, cm): + RemoteDriver.__init__(self, cm) + self.name = "RemoteStonithd" def __call__(self, node): '''Perform the 'RemoteStonithd' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.fail_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() + self._cm.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def is_applicable(self): if not RemoteDriver.is_applicable(self): return False - if "DoFencing" in list(self.Env.keys()): - return self.Env["DoFencing"] + if "DoFencing" in list(self._env.keys()): + return self._env["DoFencing"] return True - def errorstoignore(self): - ignore_pats = [ - r"Lost connection to Pacemaker Remote node", - r"Software caused connection abort", - r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor", - r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*", - r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)", - r"error: Result of monitor operation for .* on remote-.*: Internal communication failure", - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ - ignore_pats.extend(RemoteDriver.errorstoignore(self)) - return ignore_pats + return [ r"Lost connection to Pacemaker Remote node", + r"Software caused connection abort", + r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor", + r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*", + r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)", + r"error: Result of monitor operation for .* on remote-.*: Internal communication failure" ] + super().errors_to_ignore AllTestClasses.append(RemoteStonithd) class RemoteMigrate(RemoteDriver): + def __init__(self, cm): + RemoteDriver.__init__(self, cm) + self.name = "RemoteMigrate" def __call__(self, node): '''Perform the 'RemoteMigrate' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.migrate_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() + self._cm.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def is_applicable(self): if not RemoteDriver.is_applicable(self): return 0 # This test requires at least three nodes: one to convert to a # remote node, one to host the connection originally, and one # to migrate the connection to. - if len(self.Env["nodes"]) < 3: + if len(self._env["nodes"]) < 3: return 0 return 1 AllTestClasses.append(RemoteMigrate) class RemoteRscFailure(RemoteDriver): + def __init__(self, cm): + RemoteDriver.__init__(self, cm) + self.name = "RemoteRscFailure" def __call__(self, node): '''Perform the 'RemoteRscFailure' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) # This is an important step. We are migrating the connection # before failing the resource. This verifies that the migration # has properly maintained control over the remote-node. self.migrate_connection(node) self.fail_rsc(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") - self.CM.cluster_stable() + self._cm.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() - def errorstoignore(self): - ignore_pats = [ - r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)", - r"Dummy.*: No process state file found", - ] + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ - ignore_pats.extend(RemoteDriver.errorstoignore(self)) - return ignore_pats + return [ r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)", + r"Dummy.*: No process state file found" ] + super().errors_to_ignore def is_applicable(self): if not RemoteDriver.is_applicable(self): return 0 # This test requires at least three nodes: one to convert to a # remote node, one to host the connection originally, and one # to migrate the connection to. - if len(self.Env["nodes"]) < 3: + if len(self._env["nodes"]) < 3: return 0 return 1 AllTestClasses.append(RemoteRscFailure) # vim:ts=4:sw=4:et: diff --git a/python/pacemaker/_cts/Makefile.am b/python/pacemaker/_cts/Makefile.am index c3ded161e5..3d24891fb5 100644 --- a/python/pacemaker/_cts/Makefile.am +++ b/python/pacemaker/_cts/Makefile.am @@ -1,26 +1,29 @@ # # Copyright 2023 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # MAINTAINERCLEANFILES = Makefile.in pkgpythondir = $(pythondir)/$(PACKAGE)/_cts pkgpython_PYTHON = CTS.py \ __init__.py \ audits.py \ corosync.py \ environment.py \ errors.py \ input.py \ logging.py \ patterns.py \ process.py \ remote.py \ test.py \ + timer.py \ watcher.py + +SUBDIRS = tests diff --git a/python/pacemaker/_cts/tests/Makefile.am b/python/pacemaker/_cts/tests/Makefile.am new file mode 100644 index 0000000000..2a9bd7d134 --- /dev/null +++ b/python/pacemaker/_cts/tests/Makefile.am @@ -0,0 +1,20 @@ +# +# Copyright 2023 the Pacemaker project contributors +# +# The version control history for this file may have further details. +# +# This source code is licensed under the GNU General Public License version 2 +# or later (GPLv2+) WITHOUT ANY WARRANTY. +# + +MAINTAINERCLEANFILES = Makefile.in + +pkgpythondir = $(pythondir)/$(PACKAGE)/_cts/tests + +pkgpython_PYTHON = __init__.py \ + ctstest.py \ + remotedriver.py \ + simulstartlite.py \ + simulstoplite.py \ + starttest.py \ + stoptest.py diff --git a/python/pacemaker/_cts/tests/__init__.py b/python/pacemaker/_cts/tests/__init__.py new file mode 100644 index 0000000000..82fa9ba362 --- /dev/null +++ b/python/pacemaker/_cts/tests/__init__.py @@ -0,0 +1,13 @@ +""" +Test classes for the `pacemaker._cts` package. +""" + +__copyright__ = "Copyright 2023 the Pacemaker project contributors" +__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)" + +from pacemaker._cts.tests.ctstest import CTSTest +from pacemaker._cts.tests.remotedriver import RemoteDriver +from pacemaker._cts.tests.simulstartlite import SimulStartLite +from pacemaker._cts.tests.simulstoplite import SimulStopLite +from pacemaker._cts.tests.starttest import StartTest +from pacemaker._cts.tests.stoptest import StopTest diff --git a/python/pacemaker/_cts/tests/ctstest.py b/python/pacemaker/_cts/tests/ctstest.py new file mode 100644 index 0000000000..f2fe8bf710 --- /dev/null +++ b/python/pacemaker/_cts/tests/ctstest.py @@ -0,0 +1,290 @@ +""" Base classes for CTS tests """ + +__all__ = ["CTSTest"] +__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +import re + +from pacemaker._cts.audits import AuditConstraint, AuditResource +from pacemaker._cts.environment import EnvFactory +from pacemaker._cts.logging import LogFactory +from pacemaker._cts.patterns import PatternSelector +from pacemaker._cts.remote import RemoteFactory +from pacemaker._cts.timer import Timer +from pacemaker._cts.watcher import LogWatcher + +# Disable various pylint warnings that occur in so many places throughout this +# file it's easiest to just take care of them globally. This does introduce the +# possibility that we'll miss some other cause of the same warning, but we'll +# just have to be careful. + +# pylint doesn't understand that self._rsh is callable. +# pylint: disable=not-callable + + +class CTSTest: + """ The base class for all cluster tests. This implements a basic set of + properties and behaviors like setup, tear down, time keeping, and + statistics tracking. It is up to specific tests to implement their own + specialized behavior on top of this class. + """ + + def __init__(self, cm): + """ Create a new CTSTest instance + + Arguments: + + cm -- A ClusterManager instance + """ + + # pylint: disable=invalid-name + + self.audits = [] + self.name = None + self.templates = PatternSelector(cm["Name"]) + + self.stats = { "auditfail": 0, + "calls": 0, + "failure": 0, + "skipped": 0, + "success": 0 } + + self._cm = cm + self._env = EnvFactory().getInstance() + self._r_o2cb = None + self._r_ocfs2 = [] + self._rsh = RemoteFactory().getInstance() + self._logger = LogFactory() + self._timers = {} + + self.benchmark = True # which tests to benchmark + self.failed = False + self.is_container = False + self.is_experimental = False + self.is_loop = False + self.is_unsafe = False + self.is_valgrind = False + self.passed = True + + def log(self, args): + """ Log a message """ + + self._logger.log(args) + + def debug(self, args): + """ Log a debug message """ + + self._logger.debug(args) + + def get_timer(self, key="test"): + """ Get the start time of the given timer """ + + try: + return self._timers[key].start_time + except KeyError: + return 0 + + def set_timer(self, key="test"): + """ Set the start time of the given timer to now, and return + that time + """ + + if key not in self._timers: + self._timers[key] = Timer(self._logger, self.name, key) + + self._timers[key].start() + return self._timers[key].start_time + + def log_timer(self, key="test"): + """ Log the elapsed time of the given timer """ + + if key not in self._timers: + return + + elapsed = self._timers[key].elapsed + self.debug("%s:%s runtime: %.2f" % (self.name, key, elapsed)) + del self._timers[key] + + def incr(self, name): + """ Increment the given stats key """ + + if name not in self.stats: + self.stats[name] = 0 + + self.stats[name] += 1 + + # Reset the test passed boolean + if name == "calls": + self.passed = True + + def failure(self, reason="none"): + """ Increment the failure count, with an optional failure reason """ + + self.passed = False + self.incr("failure") + self._logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason) + + return False + + def success(self): + """ Increment the success count """ + + self.incr("success") + return True + + def skipped(self): + """ Increment the skipped count """ + + self.incr("skipped") + return True + + def __call__(self, node): + """ Perform this test """ + + raise NotImplementedError + + def audit(self): + """ Perform all the relevant audits (see ClusterAudit), returning + whether or not they all passed. + """ + + passed = True + + for audit in self.audits: + if not audit(): + self._logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name)) + self.incr("auditfail") + passed = False + + return passed + + def setup(self, node): + """ Setup this test """ + + # node is used in subclasses + # pylint: disable=unused-argument + + return self.success() + + def teardown(self, node): + """ Tear down this test """ + + # node is used in subclasses + # pylint: disable=unused-argument + + return self.success() + + def create_watch(self, patterns, timeout, name=None): + """ Create a new LogWatcher object with the given patterns, timeout, + and optional name. This object can be used to search log files + for matching patterns during this test's run. + """ + if not name: + name = self.name + + return LogWatcher(self._env["LogFileName"], patterns, self._env["nodes"], self._env["LogWatcher"], name, timeout) + + def local_badnews(self, prefix, watch, local_ignore=None): + """ Use the given watch object to search through log files for messages + starting with the given prefix. If no prefix is given, use + "LocalBadNews:" by default. The optional local_ignore list should + be a list of regexes that, if found in a line, will cause that line + to be ignored. + + Return the number of matches found. + """ + errcount = 0 + if not prefix: + prefix = "LocalBadNews:" + + ignorelist = [" CTS: ", prefix] + + if local_ignore: + ignorelist += local_ignore + + while errcount < 100: + match = watch.look(0) + if match: + add_err = True + + for ignore in ignorelist: + if add_err and re.search(ignore, match): + add_err = False + + if add_err: + self._logger.log("%s %s" % (prefix, match)) + errcount += 1 + else: + break + else: + self._logger.log("Too many errors!") + + watch.end() + return errcount + + def is_applicable(self): + """ Return True if this test is applicable in the current test configuration. + This method must be implemented by all subclasses. + """ + + if self.is_loop and not self._env["loop-tests"]: + return False + + if self.is_unsafe and not self._env["unsafe-tests"]: + return False + + if self.is_valgrind and not self._env["valgrind-tests"]: + return False + + if self.is_experimental and not self._env["experimental-tests"]: + return False + + if self.is_container and not self._env["container-tests"]: + return False + + if self._env["benchmark"] and not self.benchmark: + return False + + return True + + def _find_ocfs2_resources(self, node): + """ Find any OCFS2 filesystems mounted on the given cluster node, + populating the internal self._r_ocfs2 list with them and returning + the number of OCFS2 filesystems. + """ + + self._r_o2cb = None + self._r_ocfs2 = [] + + (_, lines) = self._rsh(node, "crm_resource -c", verbose=1) + for line in lines: + if re.search("^Resource", line): + r = AuditResource(self._cm, line) + + if r.rtype == "o2cb" and r.parent != "NA": + self.debug("Found o2cb: %s" % self._r_o2cb) + self._r_o2cb = r.parent + + if re.search("^Constraint", line): + c = AuditConstraint(self._cm, line) + + if c.type == "rsc_colocation" and c.target == self._r_o2cb: + self._r_ocfs2.append(c.rsc) + + self.debug("Found ocfs2 filesystems: %s" % self._r_ocfs2) + return len(self._r_ocfs2) + + def can_run_now(self, node): + """ Return True if we can meaningfully run right now """ + + # node is used in subclasses + # pylint: disable=unused-argument + + return True + + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [] diff --git a/python/pacemaker/_cts/tests/remotedriver.py b/python/pacemaker/_cts/tests/remotedriver.py new file mode 100644 index 0000000000..852976113d --- /dev/null +++ b/python/pacemaker/_cts/tests/remotedriver.py @@ -0,0 +1,533 @@ +""" Base classes for CTS tests """ + +__all__ = ["RemoteDriver"] +__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +import os +import time +import subprocess +import tempfile + +from pacemaker._cts.tests.ctstest import CTSTest +from pacemaker._cts.tests.simulstartlite import SimulStartLite +from pacemaker._cts.tests.starttest import StartTest +from pacemaker._cts.tests.stoptest import StopTest +from pacemaker._cts.timer import Timer + +# Disable various pylint warnings that occur in so many places throughout this +# file it's easiest to just take care of them globally. This does introduce the +# possibility that we'll miss some other cause of the same warning, but we'll +# just have to be careful. + +# pylint doesn't understand that self._rsh is callable. +# pylint: disable=not-callable + + +class RemoteDriver(CTSTest): + """ A specialized base class for cluster tests that run on Pacemaker + Remote nodes. This builds on top of CTSTest to provide methods + for starting and stopping services and resources, and managing + remote nodes. This is still just an abstract class -- specific + tests need to implement their own specialized behavior. + """ + + def __init__(self, cm): + """ Create a new RemoteDriver instance + + Arguments: + + cm -- A ClusterManager instance + """ + + CTSTest.__init__(self,cm) + self.name = "RemoteDriver" + + self._corosync_enabled = False + self._pacemaker_enabled = False + self._remote_node = None + self._remote_rsc = "remote-rsc" + self._start = StartTest(cm) + self._startall = SimulStartLite(cm) + self._stop = StopTest(cm) + + self.reset() + + def reset(self): + """ Reset the state of this test back to what it was before the test + was run + """ + + self.failed = False + self.fail_string = "" + + self._pcmk_started = False + self._remote_node_added = False + self._remote_rsc_added = False + self._remote_use_reconnect_interval = self._env.random_gen.choice([True,False]) + + def fail(self, msg): + """ Mark test as failed """ + + self.failed = True + + # Always log the failure. + self._logger.log(msg) + + # Use first failure as test status, as it's likely to be most useful. + if not self.fail_string: + self.fail_string = msg + + def _get_other_node(self, node): + """ Get the first cluster node out of the environment that is not the + given node. Typically, this is used to find some node that will + still be active that we can run cluster commands on. + """ + + for othernode in self._env["nodes"]: + if othernode == node: + # we don't want to try and use the cib that we just shutdown. + # find a cluster node that is not our soon to be remote-node. + continue + + return othernode + + def _del_rsc(self, node, rsc): + """ Delete the given named resource from the cluster. The given `node` + is the cluster node on which we should *not* run the delete command. + """ + + othernode = self._get_other_node(node) + (rc, _) = self._rsh(othernode, "crm_resource -D -r %s -t primitive" % rsc) + if rc != 0: + self.fail("Removal of resource '%s' failed" % rsc) + + def _add_rsc(self, node, rsc_xml): + """ Add a resource given in XML format to the cluster. The given `node` + is the cluster node on which we should *not* run the add command. + """ + + othernode = self._get_other_node(node) + (rc, _) = self._rsh(othernode, "cibadmin -C -o resources -X '%s'" % rsc_xml) + if rc != 0: + self.fail("resource creation failed") + + def _add_primitive_rsc(self, node): + """ Add a primitive heartbeat resource for the remote node to the + cluster. The given `node` is the cluster node on which we should + *not* run the add command. + """ + + rsc_xml = """ + + + + + +""" % { "node": self._remote_rsc } + + self._add_rsc(node, rsc_xml) + if not self.failed: + self._remote_rsc_added = True + + def _add_connection_rsc(self, node): + """ Add a primitive connection resource for the remote node to the + cluster. The given `node` is teh cluster node on which we should + *not* run the add command. + """ + + rsc_xml = """ + + + +""" % { "node": self._remote_node, "server": node } + + if self._remote_use_reconnect_interval: + # Set reconnect interval on resource + rsc_xml += """ + +""" % self._remote_node + + rsc_xml += """ + + + + + + +""" % { "node": self._remote_node } + + self._add_rsc(node, rsc_xml) + if not self.failed: + self._remote_node_added = True + + def _disable_services(self, node): + """ Disable the corosync and pacemaker services on the given node """ + + self._corosync_enabled = self._env.service_is_enabled(node, "corosync") + if self._corosync_enabled: + self._env.disable_service(node, "corosync") + + self._pacemaker_enabled = self._env.service_is_enabled(node, "pacemaker") + if self._pacemaker_enabled: + self._env.disable_service(node, "pacemaker") + + def _enable_services(self, node): + """ Enable the corosync and pacemaker services on the given node """ + + if self._corosync_enabled: + self._env.enable_service(node, "corosync") + + if self._pacemaker_enabled: + self._env.enable_service(node, "pacemaker") + + def _stop_pcmk_remote(self, node): + """ Stop the Pacemaker Remote service on the given node """ + + for _ in range(10): + (rc, _) = self._rsh(node, "service pacemaker_remote stop") + if rc != 0: + time.sleep(6) + else: + break + + def _start_pcmk_remote(self, node): + """ Start the Pacemaker Remote service on the given node """ + + for _ in range(10): + (rc, _) = self._rsh(node, "service pacemaker_remote start") + if rc != 0: + time.sleep(6) + else: + self._pcmk_started = True + break + + def _freeze_pcmk_remote(self, node): + """ Simulate a Pacemaker Remote daemon failure """ + + self._rsh(node, "killall -STOP pacemaker-remoted") + + def _resume_pcmk_remote(self, node): + """ Simulate the Pacemaker Remote daemon recovering """ + + self._rsh(node, "killall -CONT pacemaker-remoted") + + def _start_metal(self, node): + """ Setup a Pacemaker Remote configuration. Remove any existing + connection resources or nodes. Start the pacemaker_remote service. + Create a connection resource. + """ + + # Cluster nodes are reused as remote nodes in remote tests. If cluster + # services were enabled at boot, in case the remote node got fenced, the + # cluster node would join instead of the expected remote one. Meanwhile + # pacemaker_remote would not be able to start. Depending on the chances, + # the situations might not be able to be orchestrated gracefully any more. + # + # Temporarily disable any enabled cluster serivces. + self._disable_services(node) + + # make sure the resource doesn't already exist for some reason + self._rsh(node, "crm_resource -D -r %s -t primitive" % self._remote_rsc) + self._rsh(node, "crm_resource -D -r %s -t primitive" % self._remote_node) + + if not self._stop(node): + self.fail("Failed to shutdown cluster node %s" % node) + return + + self._start_pcmk_remote(node) + + if not self._pcmk_started: + self.fail("Failed to start pacemaker_remote on node %s" % node) + return + + # Convert node to baremetal now that it has shutdown the cluster stack + pats = [ self.templates["Pat:RscOpOK"] % ("start", self._remote_node), + self.templates["Pat:DC_IDLE"] ] + watch = self.create_watch(pats, 120) + watch.set_watch() + + self._add_connection_rsc(node) + + with Timer(self._logger, self.name, "remoteMetalInit"): + watch.look_for_all() + + if watch.unmatched: + self.fail("Unmatched patterns: %s" % watch.unmatched) + + def migrate_connection(self, node): + """ Move the remote connection resource from the node it's currently + running on to any other available node + """ + + if self.failed: + return + + pats = [ self.templates["Pat:RscOpOK"] % ("migrate_to", self._remote_node), + self.templates["Pat:RscOpOK"] % ("migrate_from", self._remote_node), + self.templates["Pat:DC_IDLE"] ] + + watch = self.create_watch(pats, 120) + watch.set_watch() + + (rc, _) = self._rsh(node, "crm_resource -M -r %s" % self._remote_node, verbose=1) + if rc != 0: + self.fail("failed to move remote node connection resource") + return + + with Timer(self._logger, self.name, "remoteMetalMigrate"): + watch.look_for_all() + + if watch.unmatched: + self.fail("Unmatched patterns: %s" % watch.unmatched) + + def fail_rsc(self, node): + """ Cause the dummy resource running on a Pacemaker Remote node to fail + and verify that the failure is logged correctly + """ + + if self.failed: + return + + watchpats = [ self.templates["Pat:RscRemoteOpOK"] % ("stop", self._remote_rsc, self._remote_node), + self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node), + self.templates["Pat:DC_IDLE"] ] + + watch = self.create_watch(watchpats, 120) + watch.set_watch() + + self.debug("causing dummy rsc to fail.") + + self._rsh(node, "rm -f /var/run/resource-agents/Dummy*") + + with Timer(self._logger, self.name, "remoteRscFail"): + watch.look_for_all() + + if watch.unmatched: + self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched) + + def fail_connection(self, node): + """ Cause the remote connection resource to fail and verify that the + node is fenced and the connection resource is restarted on another + node. + """ + + if self.failed: + return + + watchpats = [ self.templates["Pat:Fencing_ok"] % self._remote_node, + self.templates["Pat:NodeFenced"] % self._remote_node ] + + watch = self.create_watch(watchpats, 120) + watch.set_watch() + + # freeze the pcmk remote daemon. this will result in fencing + self.debug("Force stopped active remote node") + self._freeze_pcmk_remote(node) + + self.debug("Waiting for remote node to be fenced.") + + with Timer(self._logger, self.name, "remoteMetalFence"): + watch.look_for_all() + + if watch.unmatched: + self.fail("Unmatched patterns: %s" % watch.unmatched) + return + + self.debug("Waiting for the remote node to come back up") + self._cm.ns.wait_for_node(node, 120) + + pats = [ self.templates["Pat:RscOpOK"] % ("start", self._remote_node) ] + + if self._remote_rsc_added: + pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node)) + + watch = self.create_watch([], 240) + watch.set_watch() + + # start the remote node again watch it integrate back into cluster. + self._start_pcmk_remote(node) + if not self._pcmk_started: + self.fail("Failed to start pacemaker_remote on node %s" % node) + return + + self.debug("Waiting for remote node to rejoin cluster after being fenced.") + + with Timer(self._logger, self.name, "remoteMetalRestart"): + watch.look_for_all() + + if watch.unmatched: + self.fail("Unmatched patterns: %s" % watch.unmatched) + + def _add_dummy_rsc(self, node): + """ Add a dummy resource that runs on the Pacemaker Remote node """ + + if self.failed: + return + + # verify we can put a resource on the remote node + pats = [ self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node), + self.templates["Pat:DC_IDLE"] ] + + watch = self.create_watch(pats, 120) + watch.set_watch() + + # Add a resource that must live on remote-node + self._add_primitive_rsc(node) + + # force that rsc to prefer the remote node. + (rc, _) = self._cm.rsh(node, "crm_resource -M -r %s -N %s -f" % (self._remote_rsc, self._remote_node), verbose=1) + if rc != 0: + self.fail("Failed to place remote resource on remote node.") + return + + with Timer(self._logger, self.name, "remoteMetalRsc"): + watch.look_for_all() + + if watch.unmatched: + self.fail("Unmatched patterns: %s" % watch.unmatched) + + def test_attributes(self, node): + """ Verify that attributes can be set on the Pacemaker Remote node """ + + if self.failed: + return + + # This verifies permanent attributes can be set on a remote-node. It also + # verifies the remote-node can edit its own cib node section remotely. + (rc, line) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % self._remote_node, verbose=1) + if rc != 0: + self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)) + return + + (rc, _) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % self._remote_node, verbose=1) + if rc != 0: + self.fail("Failed to get remote-node attribute") + return + + (rc, _) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % self._remote_node, verbose=1) + if rc != 0: + self.fail("Failed to delete remote-node attribute") + + def cleanup_metal(self, node): + """ Clean up the Pacemaker Remote node configuration previously created by + _setup_metal. Stop and remove dummy resources and connection resources. + Stop the pacemaker_remote service. Remove the remote node itself. + """ + + self._enable_services(node) + + if not self._pcmk_started: + return + + pats = [ ] + + watch = self.create_watch(pats, 120) + watch.set_watch() + + if self._remote_rsc_added: + pats.append(self.templates["Pat:RscOpOK"] % ("stop", self._remote_rsc)) + + if self._remote_node_added: + pats.append(self.templates["Pat:RscOpOK"] % ("stop", self._remote_node)) + + with Timer(self._logger, self.name, "remoteMetalCleanup"): + self._resume_pcmk_remote(node) + + if self._remote_rsc_added: + # Remove dummy resource added for remote node tests + self.debug("Cleaning up dummy rsc put on remote node") + self._rsh(self._get_other_node(node), "crm_resource -U -r %s" % self._remote_rsc) + self._del_rsc(node, self._remote_rsc) + + if self._remote_node_added: + # Remove remote node's connection resource + self.debug("Cleaning up remote node connection resource") + self._rsh(self._get_other_node(node), "crm_resource -U -r %s" % self._remote_node) + self._del_rsc(node, self._remote_node) + + watch.look_for_all() + + if watch.unmatched: + self.fail("Unmatched patterns: %s" % watch.unmatched) + + self._stop_pcmk_remote(node) + + self.debug("Waiting for the cluster to recover") + self._cm.cluster_stable() + + if self._remote_node_added: + # Remove remote node itself + self.debug("Cleaning up node entry for remote node") + self._rsh(self._get_other_node(node), "crm_node --force --remove %s" % self._remote_node) + + def _setup_env(self, node): + """ Setup the environment to allow Pacemaker Remote to function. This + involves generating a key and copying it to all nodes in the cluster. + """ + + self._remote_node = "remote-%s" % node + + # we are assuming if all nodes have a key, that it is + # the right key... If any node doesn't have a remote + # key, we regenerate it everywhere. + if self._rsh.exists_on_all("/etc/pacemaker/authkey", self._env["nodes"]): + return + + # create key locally + (handle, keyfile) = tempfile.mkstemp(".cts") + os.close(handle) + subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"], + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + # sync key throughout the cluster + for n in self._env["nodes"]: + self._rsh(n, "mkdir -p --mode=0750 /etc/pacemaker") + self._rsh.copy(keyfile, "root@%s:/etc/pacemaker/authkey" % n) + self._rsh(n, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey") + self._rsh(n, "chmod 0640 /etc/pacemaker/authkey") + + os.unlink(keyfile) + + def is_applicable(self): + """ Return True if this test is applicable in the current test configuration. """ + + if not CTSTest.is_applicable(self): + return False + + for node in self._env["nodes"]: + (rc, _) = self._rsh(node, "which pacemaker-remoted >/dev/null 2>&1") + if rc != 0: + return False + + return True + + def start_new_test(self, node): + """ Prepare a remote test for running by setting up its environment + and resources + """ + + self.incr("calls") + self.reset() + + ret = self._startall(None) + if not ret: + return self.failure("setup failed: could not start all nodes") + + self._setup_env(node) + self._start_metal(node) + self._add_dummy_rsc(node) + return True + + def __call__(self, node): + """ Perform this test """ + + raise NotImplementedError + + @property + def errors_to_ignore(self): + """ Return list of errors which should be ignored """ + + return [ r"""is running on remote.*which isn't allowed""", + r"""Connection terminated""", + r"""Could not send remote""" ] diff --git a/python/pacemaker/_cts/tests/simulstartlite.py b/python/pacemaker/_cts/tests/simulstartlite.py new file mode 100644 index 0000000000..66f4d5eaf1 --- /dev/null +++ b/python/pacemaker/_cts/tests/simulstartlite.py @@ -0,0 +1,131 @@ +""" Simultaneously start stopped nodes """ + +__all__ = ["SimulStartLite"] +__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +from pacemaker._cts.tests.ctstest import CTSTest + +# Disable various pylint warnings that occur in so many places throughout this +# file it's easiest to just take care of them globally. This does introduce the +# possibility that we'll miss some other cause of the same warning, but we'll +# just have to be careful. + +# pylint doesn't understand that self._env is subscriptable. +# pylint: disable=unsubscriptable-object + + +class SimulStartLite(CTSTest): + """ A pseudo-test that is only used to set up conditions before running + some other test. This class starts any stopped nodes more or less + simultaneously. + + Other test classes should not use this one as a superclass. + """ + + def __init__(self, cm): + """ Create a new SimulStartLite instance + + Arguments: + + cm -- A ClusterManager instance + """ + + CTSTest.__init__(self,cm) + self.name = "SimulStartLite" + + def __call__(self, dummy): + """ Start all stopped nodes more or less simultaneously, returning + whether this succeeded or not. + """ + + self.incr("calls") + self.debug("Setup: %s" % self.name) + + # We ignore the "node" parameter... + node_list = [] + for node in self._env["nodes"]: + if self._cm.ShouldBeStatus[node] == "down": + self.incr("WasStopped") + node_list.append(node) + + self.set_timer() + while len(node_list) > 0: + # Repeat until all nodes come up + uppat = self.templates["Pat:NonDC_started"] + if self._cm.upcount() == 0: + uppat = self.templates["Pat:Local_started"] + + watchpats = [ self.templates["Pat:DC_IDLE"] ] + for node in node_list: + watchpats.extend([uppat % node, + self.templates["Pat:InfraUp"] % node, + self.templates["Pat:PacemakerUp"] % node]) + + # Start all the nodes - at about the same time... + watch = self.create_watch(watchpats, self._env["DeadTime"]+10) + watch.set_watch() + + stonith = self._cm.prepare_fencing_watcher(self.name) + + for node in node_list: + self._cm.StartaCMnoBlock(node) + + watch.look_for_all() + + node_list = self._cm.fencing_cleanup(self.name, stonith) + + if node_list is None: + return self.failure("Cluster did not stabilize") + + # Remove node_list messages from watch.unmatched + for node in node_list: + self._logger.debug("Dealing with stonith operations for %s" % node_list) + if watch.unmatched: + try: + watch.unmatched.remove(uppat % node) + except ValueError: + self.debug("Already matched: %s" % (uppat % node)) + + try: + watch.unmatched.remove(self.templates["Pat:InfraUp"] % node) + except ValueError: + self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node)) + + try: + watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node) + except ValueError: + self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node)) + + if watch.unmatched: + for regex in watch.unmatched: + self._logger.log ("Warn: Startup pattern not found: %s" % regex) + + if not self._cm.cluster_stable(): + return self.failure("Cluster did not stabilize") + + did_fail = False + unstable = [] + for node in self._env["nodes"]: + if self._cm.StataCM(node) == 0: + did_fail = True + unstable.append(node) + + if did_fail: + return self.failure("Unstarted nodes exist: %s" % unstable) + + unstable = [] + for node in self._env["nodes"]: + if not self._cm.node_stable(node): + did_fail = True + unstable.append(node) + + if did_fail: + return self.failure("Unstable cluster nodes exist: %s" % unstable) + + return self.success() + + def is_applicable(self): + """ SimulStartLite is a setup test and never applicable """ + + return False diff --git a/python/pacemaker/_cts/tests/simulstoplite.py b/python/pacemaker/_cts/tests/simulstoplite.py new file mode 100644 index 0000000000..a5b965e798 --- /dev/null +++ b/python/pacemaker/_cts/tests/simulstoplite.py @@ -0,0 +1,91 @@ +""" Simultaneously stop running nodes """ + +__all__ = ["SimulStopLite"] +__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +from pacemaker._cts.tests.ctstest import CTSTest + +# Disable various pylint warnings that occur in so many places throughout this +# file it's easiest to just take care of them globally. This does introduce the +# possibility that we'll miss some other cause of the same warning, but we'll +# just have to be careful. + +# pylint doesn't understand that self._rsh is callable. +# pylint: disable=not-callable +# pylint doesn't understand that self._env is subscriptable. +# pylint: disable=unsubscriptable-object + + +class SimulStopLite(CTSTest): + """ A pseudo-test that is only used to set up conditions before running + some other test. This class stops any running nodes more or less + simultaneously. It can be used both to set up a test or to clean up + a test. + + Other test classes should not use this one as a superclass. + """ + + def __init__(self, cm): + """ Create a new SimulStopLite instance + + Arguments: + + cm -- A ClusterManager instance + """ + + CTSTest.__init__(self,cm) + self.name = "SimulStopLite" + + def __call__(self, dummy): + """ Stop all running nodes more or less simultaneously, returning + whether this succeeded or not. + """ + + self.incr("calls") + self.debug("Setup: %s" % self.name) + + # We ignore the "node" parameter... + watchpats = [] + + for node in self._env["nodes"]: + if self._cm.ShouldBeStatus[node] == "up": + self.incr("WasStarted") + watchpats.append(self.templates["Pat:We_stopped"] % node) + + if len(watchpats) == 0: + return self.success() + + # Stop all the nodes - at about the same time... + watch = self.create_watch(watchpats, self._env["DeadTime"]+10) + + watch.set_watch() + self.set_timer() + for node in self._env["nodes"]: + if self._cm.ShouldBeStatus[node] == "up": + self._cm.StopaCMnoBlock(node) + + if watch.look_for_all(): + # Make sure they're completely down with no residule + for node in self._env["nodes"]: + self._rsh(node, self.templates["StopCmd"]) + + return self.success() + + did_fail = False + up_nodes = [] + for node in self._env["nodes"]: + if self._cm.StataCM(node) == 1: + did_fail = True + up_nodes.append(node) + + if did_fail: + return self.failure("Active nodes exist: %s" % up_nodes) + + self._logger.log("Warn: All nodes stopped but CTS didn't detect: %s" % watch.unmatched) + return self.failure("Missing log message: %s " % watch.unmatched) + + def is_applicable(self): + """ SimulStopLite is a setup test and never applicable """ + + return False diff --git a/python/pacemaker/_cts/tests/starttest.py b/python/pacemaker/_cts/tests/starttest.py new file mode 100644 index 0000000000..aba2899fd2 --- /dev/null +++ b/python/pacemaker/_cts/tests/starttest.py @@ -0,0 +1,54 @@ +""" Start the cluster manager on a given node """ + +__all__ = ["StartTest"] +__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +from pacemaker._cts.tests.ctstest import CTSTest + +# Disable various pylint warnings that occur in so many places throughout this +# file it's easiest to just take care of them globally. This does introduce the +# possibility that we'll miss some other cause of the same warning, but we'll +# just have to be careful. + +# pylint doesn't understand that self._env is subscriptable. +# pylint: disable=unsubscriptable-object + + +class StartTest(CTSTest): + """ A pseudo-test that is only used to set up conditions before running + some other test. This class starts the cluster manager on a given + node. + + Other test classes should not use this one as a superclass. + """ + + def __init__(self, cm): + """ Create a new StartTest instance + + Arguments: + + cm -- A ClusterManager instance + """ + + CTSTest.__init__(self,cm) + self.name = "Start" + + def __call__(self, node): + """ Start the given node, returning whether this succeeded or not """ + + self.incr("calls") + + if self._cm.upcount() == 0: + self.incr("us") + else: + self.incr("them") + + if self._cm.ShouldBeStatus[node] != "down": + return self.skipped() + + if self._cm.StartaCM(node): + return self.success() + + return self.failure("Startup %s on node %s failed" + % (self._env["Name"], node)) diff --git a/python/pacemaker/_cts/tests/stoptest.py b/python/pacemaker/_cts/tests/stoptest.py new file mode 100644 index 0000000000..a068b4d828 --- /dev/null +++ b/python/pacemaker/_cts/tests/stoptest.py @@ -0,0 +1,97 @@ +""" Stop the cluster manager on a given node """ + +__all__ = ["StopTest"] +__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +from pacemaker._cts.tests.ctstest import CTSTest + +# Disable various pylint warnings that occur in so many places throughout this +# file it's easiest to just take care of them globally. This does introduce the +# possibility that we'll miss some other cause of the same warning, but we'll +# just have to be careful. + +# pylint doesn't understand that self._rsh is callable. +# pylint: disable=not-callable +# pylint doesn't understand that self._env is subscriptable. +# pylint: disable=unsubscriptable-object + + +class StopTest(CTSTest): + """ A pseudo-test that is only used to set up conditions before running + some other test. This class stops the cluster manager on a given + node. + + Other test classes should not use this one as a superclass. + """ + + def __init__(self, cm): + """ Create a new StopTest instance + + Arguments: + + cm -- A ClusterManager instance + """ + + CTSTest.__init__(self, cm) + self.name = "Stop" + + def __call__(self, node): + """ Stop the given node, returning whether this succeeded or not """ + + self.incr("calls") + if self._cm.ShouldBeStatus[node] != "up": + return self.skipped() + + # Technically we should always be able to notice ourselves stopping + patterns = [ self.templates["Pat:We_stopped"] % node ] + + # Any active node needs to notice this one left + # (note that this won't work if we have multiple partitions) + for other in self._env["nodes"]: + if self._cm.ShouldBeStatus[other] == "up" and other != node: + patterns.append(self.templates["Pat:They_stopped"] %(other, self._cm.key_for_node(node))) + + watch = self.create_watch(patterns, self._env["DeadTime"]) + watch.set_watch() + + if node == self._cm.OurNode: + self.incr("us") + else: + if self._cm.upcount() <= 1: + self.incr("all") + else: + self.incr("them") + + self._cm.StopaCM(node) + watch.look_for_all() + + failreason = None + unmatched_str = "||" + + if watch.unmatched: + (_, output) = self._rsh(node, "/bin/ps axf", verbose=1) + for line in output: + self.debug(line) + + (_, output) = self._rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", verbose=1) + for line in output: + self.debug(line) + + for regex in watch.unmatched: + self._logger.log ("ERROR: Shutdown pattern not found: %s" % regex) + unmatched_str += "%s||" % regex + failreason = "Missing shutdown pattern" + + self._cm.cluster_stable(self._env["DeadTime"]) + + if not watch.unmatched or self._cm.upcount() == 0: + return self.success() + + if len(watch.unmatched) >= self._cm.upcount(): + return self.failure("no match against (%s)" % unmatched_str) + + if failreason is None: + return self.success() + + return self.failure(failreason) diff --git a/python/pacemaker/_cts/timer.py b/python/pacemaker/_cts/timer.py new file mode 100644 index 0000000000..122b70b711 --- /dev/null +++ b/python/pacemaker/_cts/timer.py @@ -0,0 +1,63 @@ +""" Timer-related utilities for CTS """ + +__all__ = ["Timer"] +__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" +__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + +import time + +class Timer: + """ A class for measuring the runtime of some task. A Timer may be used + manually or as a context manager, like so: + + with Timer(logger, "SomeTest", "SomeTimer"): + ... + + A Timer runs from when start() is called until the timer is deleted + or reset() is called. There is no explicit stop method. + """ + + def __init__(self, logger, test_name, timer_name): + """ Create a new Timer instance. + + Arguments: + + logger -- A Logger instance that can be used to record when + the timer stopped + test_name -- The name of the test this timer is being run for + timer_name -- The name of this timer + """ + + self._logger = logger + self._start_time = None + self._test_name = test_name + self._timer_name = timer_name + + def __enter__(self): + self.start() + return self + + def __exit__(self, *args): + self._logger.debug("%s:%s runtime: %.2f" % (self._test_name, self._timer_name, self.elapsed)) + + def reset(self): + """ Restart the timer """ + + self.start() + + def start(self): + """ Start the timer """ + + self._start_time = time.time() + + @property + def start_time(self): + """ When did the timer start? """ + + return self._start_time + + @property + def elapsed(self): + """ How long has the timer been running for? """ + + return time.time() - self._start_time diff --git a/python/pylintrc b/python/pylintrc index e65110b601..f46eeceead 100644 --- a/python/pylintrc +++ b/python/pylintrc @@ -1,556 +1,557 @@ # NOTE: Any line with CHANGED: describes something that we changed from the # default pylintrc configuration. [MAIN] # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Files or directories to be skipped. They should be base names, not # paths. ignore=CVS # Add files or directories matching the regex patterns to the ignore-list. The # regex matches against paths and can be in Posix or Windows format. ignore-paths= # Files or directories matching the regex patterns are skipped. The regex # matches against base names, not paths. ignore-patterns=^\.# # Pickle collected data for later comparisons. persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use. jobs=1 # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code extension-pkg-allow-list= # Minimum supported python version # CHANGED py-version = 3.4 # Control the amount of potential inferred values when inferring a single # object. This can help the performance when dealing with large functions or # complex, nested conditions. limit-inference-results=100 # Specify a score threshold under which the program will exit with error. fail-under=10.0 # Return non-zero exit code if any of these messages/categories are detected, # even if score is above --fail-under value. Syntax same as enable. Messages # specified are enabled, while categories only check already-enabled messages. fail-on= # Clear in-memory caches upon conclusion of linting. Useful if running pylint in # a server-like mode. clear-cache-post-run=no [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED # confidence= # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. enable= use-symbolic-message-instead, useless-suppression, # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once).You can also use "--disable=all" to # disable everything first and then re-enable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" # CHANGED disable=line-too-long, too-few-public-methods, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-statements, unrecognized-option, useless-option-value [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs # (visual studio) and html. You can also give a reporter class, eg # mypackage.mymodule.MyReporterClass. output-format=text # Tells whether to display a full report or only the messages reports=no # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables 'fatal', 'error', 'warning', 'refactor', 'convention' # and 'info', which contain the number of messages in each category, as # well as 'statement', which is the total number of statements analyzed. This # score is used by the global evaluation report (RP0004). evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details #msg-template= # Activate the evaluation score. score=yes [LOGGING] # Logging modules to check that the string format arguments are in logging # function parameter format logging-modules=logging # The type of string formatting that logging methods do. `old` means using % # formatting, `new` is for `{}` formatting. logging-format-style=old [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. # CHANGED: Don't do anything about FIXME, XXX, or TODO notes= # Regular expression of note tags to take in consideration. #notes-rgx= [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=6 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # Ignore imports when computing similarities. ignore-imports=yes # Signatures are removed from the similarity computation ignore-signatures=yes [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # List of additional names supposed to be defined in builtins. Remember that # you should avoid defining new builtins when possible. additional-builtins= # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_,_cb # Tells whether unused global variables should be treated as a violation. allow-global-unused-variables=yes # List of names allowed to shadow builtins allowed-redefined-builtins= # List of qualified module names which can have objects that can redefine # builtins. redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io [FORMAT] # Maximum number of characters on a single line. max-line-length=100 # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )??$ # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no # Allow the body of a class to be on the same line as the declaration if body # contains single statement. single-line-class-stmt=no # Maximum number of lines in a module max-module-lines=2000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format= [BASIC] # Good variable names which should always be accepted, separated by a comma # CHANGED: Single variable names are handled by variable-rgx below, leaving # _ here as the name for any variable that should be ignored. good-names=_ # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted good-names-rgxs= # Bad variable names which should always be refused, separated by a comma bad-names=foo,bar,baz,toto,tutu,tata # Bad variable names regexes, separated by a comma. If names match any regex, # they will always be refused bad-names-rgxs= # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= # Include a hint for the correct naming format with invalid-name include-naming-hint=no # Naming style matching correct function names. function-naming-style=snake_case # Regular expression matching correct function names function-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming style matching correct variable names. variable-naming-style=snake_case # Regular expression matching correct variable names # CHANGED: One letter variables are fine variable-rgx=[a-z_][a-z0-9_]{,30}$ # Naming style matching correct constant names. const-naming-style=UPPER_CASE # Regular expression matching correct constant names const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Naming style matching correct attribute names. attr-naming-style=snake_case # Regular expression matching correct attribute names attr-rgx=[a-z_][a-z0-9_]{2,}$ # Naming style matching correct argument names. argument-naming-style=snake_case # Regular expression matching correct argument names argument-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming style matching correct class attribute names. class-attribute-naming-style=any # Regular expression matching correct class attribute names class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ # Naming style matching correct class constant names. class-const-naming-style=UPPER_CASE # Regular expression matching correct class constant names. Overrides class- # const-naming-style. #class-const-rgx= # Naming style matching correct inline iteration names. inlinevar-naming-style=any # Regular expression matching correct inline iteration names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Naming style matching correct class names. class-naming-style=PascalCase # Regular expression matching correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ # Naming style matching correct module names. module-naming-style=snake_case # Regular expression matching correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Naming style matching correct method names. method-naming-style=snake_case # Regular expression matching correct method names method-rgx=[a-z_][a-z0-9_]{2,}$ # Regular expression matching correct type variable names #typevar-rgx= # Regular expression which should only match function or class names that do # not require a docstring. Use ^(?!__init__$)_ to also check __init__. no-docstring-rgx=__.*__ # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 # List of decorators that define properties, such as abc.abstractproperty. property-classes=abc.abstractproperty [TYPECHECK] # Regex pattern to define which classes are considered mixins if ignore-mixin- # members is set to 'yes' mixin-class-rgx=.*MixIn # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis). It # supports qualified module names, as well as Unix pattern matching. ignored-modules= # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. ignored-classes=SQLObject, optparse.Values, thread._local, _thread._local # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members=REQUEST,acl_users,aq_parent,argparse.Namespace # List of decorators that create context managers from functions, such as # contextlib.contextmanager. contextmanager-decorators=contextlib.contextmanager # Tells whether to warn about missing members when the owner of the attribute # is inferred to be None. ignore-none=yes # This flag controls whether pylint should warn about no-member and similar # checks whenever an opaque object is returned when inferring. The inference # can return multiple potential results while evaluating a Python object, but # some branches might not be evaluated, which results in partial inference. In # that case, it might be useful to still emit no-member and other checks for # the rest of the inferred objects. ignore-on-opaque-inference=yes # Show a hint with possible names when a member name was not found. The aspect # of finding the hint is based on edit distance. missing-member-hint=yes # The minimum edit distance a name should have in order to be considered a # similar match for a missing member name. missing-member-hint-distance=1 # The total number of similar names that should be taken in consideration when # showing a hint for a missing member. missing-member-max-choices=1 [SPELLING] # Spelling dictionary name. Available dictionaries: none. To make it working # install python-enchant package. spelling-dict= # List of comma separated words that should not be checked. spelling-ignore-words= # List of comma separated words that should be considered directives if they # appear and the beginning of a comment and should not be checked. spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:,pragma:,# noinspection # A path to a file that contains private dictionary; one word per line. spelling-private-dict-file=.pyenchant_pylint_custom_dict.txt # Tells whether to store unknown words to indicated private dictionary in # --spelling-private-dict-file option instead of raising a message. spelling-store-unknown-words=no # Limits count of emitted suggestions for spelling mistakes. max-spelling-suggestions=2 [DESIGN] # Maximum number of arguments for function / method max-args=10 # Maximum number of locals for function / method body max-locals=25 # Maximum number of return / yield for function / method body max-returns=11 # Maximum number of branch for function / method body max-branches=27 # Maximum number of statements in function / method body max-statements=100 # Maximum number of parents for a class (see R0901). max-parents=7 # List of qualified class names to ignore when counting class parents (see R0901). ignored-parents= # Maximum number of attributes for a class (see R0902). max-attributes=11 # Minimum number of public methods for a class (see R0903). min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=25 # Maximum number of boolean expressions in an if statement (see R0916). max-bool-expr=5 # Maximum number of statements in a try-block max-try-statements = 14 # List of regular expressions of class ancestor names to # ignore when counting public methods (see R0903). exclude-too-few-public-methods= [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp,__post_init__ +# CHANGED: Remove setUp and __post_init__, add reset +defining-attr-methods=__init__,__new__,reset # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. valid-metaclass-classmethod-first-arg=mcs # List of member names, which should be excluded from the protected access # warning. exclude-protected=_asdict,_fields,_replace,_source,_make # Warn about protected attribute access inside special methods check-protected-access-in-special-methods=no [IMPORTS] # List of modules that can be imported at any level, not just the top level # one. allow-any-import-level= # Allow wildcard imports from modules that define __all__. allow-wildcard-with-all=no # Analyse import fallback blocks. This can be used to support both Python 2 and # 3 compatible code, which means that the block might have code that exists # only in one or another interpreter, leading to false positives when analysed. analyse-fallback-blocks=no # Deprecated modules which should not be used, separated by a comma deprecated-modules=regsub,TERMIOS,Bastion,rexec # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled) int-import-graph= # Force import order to recognize a module as part of the standard # compatibility libraries. known-standard-library= # Force import order to recognize a module as part of a third party library. known-third-party=enchant # Couples of modules and preferred modules, separated by a comma. preferred-modules= [EXCEPTIONS] # Exceptions that will emit a warning when being caught. Defaults to # "Exception" overgeneral-exceptions=builtins.Exception [TYPING] # Set to ``no`` if the app / library does **NOT** need to support runtime # introspection of type annotations. If you use type annotations # **exclusively** for type checking of an application, you're probably fine. # For libraries, evaluate if some users what to access the type hints at # runtime first, e.g., through ``typing.get_type_hints``. Applies to Python # versions 3.7 - 3.9 runtime-typing = no [DEPRECATED_BUILTINS] # List of builtins function names that should not be used, separated by a comma bad-functions=map,input [REFACTORING] # Maximum number of nested blocks for function / method body max-nested-blocks=5 # Complete name of functions that never returns. When checking for # inconsistent-return-statements if a never returning function is called then # it will be considered as an explicit return statement and no message will be # printed. never-returning-functions=sys.exit,argparse.parse_error [STRING] # This flag controls whether inconsistent-quotes generates a warning when the # character used as a quote delimiter is used inconsistently within a module. check-quote-consistency=no # This flag controls whether the implicit-str-concat should generate a warning # on implicit string concatenation in sequences defined over several lines. check-str-concat-over-line-jumps=no [CODE_STYLE] # Max line length for which to sill emit suggestions. Used to prevent optional # suggestions which would get split by a code formatter (e.g., black). Will # default to the setting for ``max-line-length``. #max-line-length-suggestions= diff --git a/python/setup.py.in b/python/setup.py.in index c4083da3d7..e9d61d0a68 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,20 +1,20 @@ #!@PYTHON@ import re from setuptools import setup # This will match things like "2.1.3" and "2.1.3-100", but not things like # "2.1.3-100.deadbeef". Any other formats (or lack of a match) will result # in an exception during package building, which is probably okay. That's an # error on our part and is something we should fix. ver = re.match("[0-9.]+[0-9-]*", "@PACKAGE_VERSION@")[0] setup(name='pacemaker', version=ver, author='The Pacemaker project contributors', author_email='@PACKAGE_BUGREPORT@', license='LGPLv2.1+', url='https://clusterlabs.org/pacemaker/', description='Python libraries for Pacemaker', - packages=['pacemaker', 'pacemaker._cts'], + packages=['pacemaker', 'pacemaker._cts', 'pacemaker._cts.tests'], )