Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/configure.ac b/configure.ac
index 0fbd49702f..ab30b1bf46 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,2154 +1,2159 @@
dnl
dnl autoconf for Pacemaker
dnl
dnl Copyright 2009-2023 the Pacemaker project contributors
dnl
dnl The version control history for this file may have further details.
dnl
dnl This source code is licensed under the GNU General Public License version 2
dnl or later (GPLv2+) WITHOUT ANY WARRANTY.
dnl ===============================================
dnl Bootstrap
dnl ===============================================
AC_PREREQ(2.64)
dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08).
dnl Once we can require that version, we can simplify this, and no longer
dnl need ACLOCAL_AMFLAGS in Makefile.am.
m4_ifdef([AC_CONFIG_MACRO_DIRS],
[AC_CONFIG_MACRO_DIRS([m4])],
[AC_CONFIG_MACRO_DIR([m4])])
AC_DEFUN([AC_DATAROOTDIR_CHECKED])
dnl Suggested structure:
dnl information on the package
dnl checks for programs
dnl checks for libraries
dnl checks for header files
dnl checks for types
dnl checks for structures
dnl checks for compiler characteristics
dnl checks for library functions
dnl checks for system services
m4_include([m4/version.m4])
AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker],
PCMK_URL)
PCMK_FEATURES=""
LT_CONFIG_LTDL_DIR([libltdl])
AC_CONFIG_AUX_DIR([libltdl/config])
AC_CANONICAL_HOST
dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go
dnl
dnl Internal header: include/config.h
dnl - Contains ALL defines
dnl - include/config.h.in is generated automatically by autoheader
dnl - NOT to be included in any header files except crm_internal.h
dnl (which is also not to be included in any other header files)
dnl
dnl External header: include/crm_config.h
dnl - Contains a subset of defines checked here
dnl - Manually edit include/crm_config.h.in to have configure include
dnl new defines
dnl - Should not include HAVE_* defines
dnl - Safe to include anywhere
AC_CONFIG_HEADERS([include/config.h include/crm_config.h])
dnl 1.13: minimum automake version required
dnl foreign: don't require GNU-standard top-level files
dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7
dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+)
AM_INIT_AUTOMAKE([1.13 foreign tar-ustar subdir-objects])
dnl Require minimum version of pkg-config
PKG_PROG_PKG_CONFIG(0.27)
AS_IF([test x"${PKG_CONFIG}" != x""], [],
[AC_MSG_FAILURE([Could not find required build tool pkg-config (0.27 or later)])])
PKG_INSTALLDIR
PKG_NOARCH_INSTALLDIR
dnl Example 2.4. Silent Custom Rule to Generate a File
dnl %-bar.pc: %.pc
dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@
CC_IN_CONFIGURE=yes
export CC_IN_CONFIGURE
LDD=ldd
dnl ========================================================================
dnl Compiler characteristics
dnl ========================================================================
dnl A particular compiler can be forced by setting the CC environment variable
AC_PROG_CC
dnl Use at least C99 if possible (automatic for autoconf >= 2.70)
m4_version_prereq([2.70], [:], [AC_PROG_CC_STDC])
dnl C++ is not needed for build, just maintainer utilities
AC_PROG_CXX
dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs:
dnl "The macro gl_EARLY must be called as soon as possible after verifying that
dnl the C compiler is working. ... The core part of the gnulib checks are done
dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL
dnl as a dependency.
gl_EARLY
gl_SET_CRYPTO_CHECK_DEFAULT([no])
gl_INIT
# --enable-new-dtags: Use RUNPATH instead of RPATH.
# It is necessary to have this done before libtool does linker detection.
# See also: https://github.com/kronosnet/kronosnet/issues/107
AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags],
[AM_LDFLAGS=-Wl,--enable-new-dtags],
[AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])])
AC_SUBST([AM_LDFLAGS])
saved_LDFLAGS="$LDFLAGS"
LDFLAGS="$AM_LDFLAGS $LDFLAGS"
LT_INIT([dlopen])
LDFLAGS="$saved_LDFLAGS"
LTDL_INIT([convenience])
AC_TYPE_SIZE_T
AC_CHECK_SIZEOF(char)
AC_CHECK_SIZEOF(short)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(long long)
dnl ===============================================
dnl Helpers
dnl ===============================================
cc_supports_flag() {
local CFLAGS="-Werror $@"
AC_MSG_CHECKING([whether $CC supports $@])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])],
[RC=0; AC_MSG_RESULT([yes])],
[RC=1; AC_MSG_RESULT([no])])
return $RC
}
# Some tests need to use their own CFLAGS
cc_temp_flags() {
ac_save_CFLAGS="$CFLAGS"
CFLAGS="$*"
}
cc_restore_flags() {
CFLAGS=$ac_save_CFLAGS
}
# expand_path_option $path_variable_name $default
expand_path_option() {
# The first argument is the variable *name* (not value)
ac_path_varname="$1"
# Get the original value of the variable
ac_path_value=$(eval echo "\${${ac_path_varname}}")
# Expand any literal variable expressions in the value so that we don't
# end up with something like '${prefix}' in #defines etc.
#
# Autoconf deliberately leaves values unexpanded to allow overriding
# the configure script choices in make commands (for example,
# "make exec_prefix=/foo install"). No longer being able to do this seems
# like no great loss.
eval ac_path_value=$(eval echo "${ac_path_value}")
# Use (expanded) default if necessary
AS_IF([test x"${ac_path_value}" = x""],
[eval ac_path_value=$(eval echo "$2")])
# Require a full path
AS_CASE(["$ac_path_value"],
[/*], [eval ${ac_path_varname}="$ac_path_value"],
[*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])]
)
}
# yes_no_try $user_response $default
DISABLED=0
REQUIRED=1
OPTIONAL=2
yes_no_try() {
local value
AS_IF([test x"$1" = x""], [value="$2"], [value="$1"])
AS_CASE(["`echo "$value" | tr '[A-Z]' '[a-z]'`"],
[0|no|false|disable], [return $DISABLED],
[1|yes|true|enable], [return $REQUIRED],
[try|check], [return $OPTIONAL]
)
AC_MSG_ERROR([Invalid option value "$value"])
}
check_systemdsystemunitdir() {
AC_MSG_CHECKING([which system unit file directory to use])
PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir])
AC_MSG_RESULT([${systemdsystemunitdir}])
test x"$systemdsystemunitdir" != x""
return $?
}
#
# Fix the defaults of certain built-in variables so they can be used in our
# custom argument defaults
#
AC_MSG_NOTICE([Sanitizing prefix: ${prefix}])
AS_IF([test x"$prefix" = x"NONE"],
[
prefix=/usr
dnl Fix default variables - "prefix" variable if not specified
AS_IF([test x"$localstatedir" = x"\${prefix}/var"],
[localstatedir="/var"])
AS_IF([test x"$sysconfdir" = x"\${prefix}/etc"],
[sysconfdir="/etc"])
])
AC_MSG_NOTICE([Sanitizing exec_prefix: ${exec_prefix}])
AS_CASE([$exec_prefix],
[prefix|NONE], [exec_prefix=$prefix])
AC_MSG_NOTICE([Sanitizing libdir: ${libdir}])
AS_CASE([$libdir],
[prefix|NONE], [
AC_MSG_CHECKING([which lib directory to use])
for aDir in lib64 lib
do
trydir="${exec_prefix}/${aDir}"
AS_IF([test -d ${trydir}],
[
libdir=${trydir}
break
])
done
AC_MSG_RESULT([$libdir])
])
dnl ===============================================
dnl Configure Options
dnl ===============================================
dnl Actual library checks come later, but pkg-config can be used here to grab
dnl external values to use as defaults for configure options
dnl Per the autoconf docs, --enable-*/--disable-* options should control
dnl features inherent to Pacemaker, while --with-*/--without-* options should
dnl control the use of external software. However, --enable-*/--disable-* may
dnl implicitly require additional external dependencies, and
dnl --with-*/--without-* may implicitly enable or disable features, so the
dnl line is blurry.
dnl
dnl We also use --with-* options for custom file, directory, and path
dnl locations, since autoconf does not provide an option type for those.
dnl --enable-* options: build process
AC_ARG_ENABLE([quiet],
[AS_HELP_STRING([--enable-quiet],
[suppress make output unless there is an error @<:@no@:>@])]
)
yes_no_try "$enable_quiet" "no"
enable_quiet=$?
AC_ARG_ENABLE([fatal-warnings],
[AS_HELP_STRING([--enable-fatal-warnings],
[enable pedantic and fatal warnings for gcc @<:@try@:>@])],
)
yes_no_try "$enable_fatal_warnings" "try"
enable_fatal_warnings=$?
AC_ARG_ENABLE([hardening],
[AS_HELP_STRING([--enable-hardening],
[harden the resulting executables/libraries @<:@try@:>@])]
)
yes_no_try "$enable_hardening" "try"
enable_hardening=$?
dnl --enable-* options: features
AC_ARG_ENABLE([systemd],
[AS_HELP_STRING([--enable-systemd],
[enable support for managing resources via systemd @<:@try@:>@])]
)
yes_no_try "$enable_systemd" "try"
enable_systemd=$?
AC_ARG_ENABLE([upstart],
[AS_HELP_STRING([--enable-upstart],
[enable support for managing resources via Upstart (deprecated) @<:@try@:>@])]
)
yes_no_try "$enable_upstart" "try"
enable_upstart=$?
dnl --enable-* options: features inherent to Pacemaker
AC_ARG_ENABLE([compat-2.0],
[AS_HELP_STRING([--enable-compat-2.0], m4_normalize([
preserve certain output as it was in 2.0; this option will be
available only for the lifetime of the 2.1 series @<:@no@:>@]))]
)
yes_no_try "$enable_compat_2_0" "no"
enable_compat_2_0=$?
AS_IF([test $enable_compat_2_0 -ne $DISABLED],
[
AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1],
[Keep certain output compatible with 2.0 release series])
PCMK_FEATURES="$PCMK_FEATURES compat-2.0"
]
)
# Add an option to create symlinks at the pre-2.0.0 daemon name locations, so
# that users and tools can continue to invoke those names directly (e.g., for
# meta-data). This option will be removed in a future release.
AC_ARG_ENABLE([legacy-links],
[AS_HELP_STRING([--enable-legacy-links],
[add symlinks for old daemon names (deprecated) @<:@no@:>@])]
)
yes_no_try "$enable_legacy_links" "no"
enable_legacy_links=$?
AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED])
# AM_GNU_GETTEXT calls AM_NLS which defines the nls option, but it defaults
# to enabled. We override the definition of AM_NLS to flip the default and mark
# it as experimental in the help text.
AC_DEFUN([AM_NLS],
[AC_MSG_CHECKING([whether NLS is requested])
AC_ARG_ENABLE([nls],
[AS_HELP_STRING([--enable-nls],
[use Native Language Support (experimental)])],
USE_NLS=$enableval, USE_NLS=no)
AC_MSG_RESULT([$USE_NLS])
AC_SUBST([USE_NLS])]
)
AM_GNU_GETTEXT([external])
AM_GNU_GETTEXT_VERSION([0.18])
AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"])
dnl --with-* options: external software support, and custom locations
dnl This argument is defined via an M4 macro so default can be a variable
AC_DEFUN([VERSION_ARG],
[AC_ARG_WITH([version],
[AS_HELP_STRING([--with-version=VERSION],
[override package version @<:@$1@:>@])],
[ PACEMAKER_VERSION="$withval" ],
[ PACEMAKER_VERSION="$PACKAGE_VERSION" ])]
)
VERSION_ARG(VERSION_NUMBER)
# Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case
# the user used --with-version. Unfortunately, this can only affect the
# substitution variables and later uses in this file, not the config.h
# constants, so we have to be careful to use only PACEMAKER_VERSION in C code.
PACKAGE_VERSION=$PACEMAKER_VERSION
VERSION=$PACEMAKER_VERSION
# Detect highest API schema version (use git if available to list managed RNGs,
# in case there are leftover schema files from an earlier build of a different
# version, otherwise check all RNGs)
API_VERSION=$({ git ls-files xml/api/*.rng 2>/dev/null || ls -1 xml/api/*.rng ; } dnl
| sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' | sort -V | tail -1)
AC_DEFINE_UNQUOTED([PCMK__API_VERSION], ["$API_VERSION"],
[Highest API schema version])
+# Detect highest CIB schema version
+CIB_VERSION=$({ git ls-files xml/*.rng 2>/dev/null || ls -1 xml/*.rng ; } dnl
+ | sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' | sort -V | tail -1)
+AC_SUBST(CIB_VERSION)
+
# Re-run configure at next make if any RNG changes, to re-detect highest
AC_SUBST([CONFIG_STATUS_DEPENDENCIES],
[$(echo '$(wildcard $(top_srcdir)/xml/api/*.rng)')])
CRM_DAEMON_USER=""
AC_ARG_WITH([daemon-user],
[AS_HELP_STRING([--with-daemon-user=USER],
[user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])],
[ CRM_DAEMON_USER="$withval" ]
)
CRM_DAEMON_GROUP=""
AC_ARG_WITH([daemon-group],
[AS_HELP_STRING([--with-daemon-group=GROUP],
[group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])],
[ CRM_DAEMON_GROUP="$withval" ]
)
BUG_URL=""
AC_ARG_WITH([bug-url],
[AS_HELP_STRING([--with-bug-url=DIR], m4_normalize([
address where users should submit bug reports
@<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))],
[ BUG_URL="$withval" ]
)
dnl --with-* options: features
AC_ARG_WITH([cibsecrets],
[AS_HELP_STRING([--with-cibsecrets],
[support separate file for CIB secrets @<:@no@:>@])]
)
yes_no_try "$with_cibsecrets" "no"
with_cibsecrets=$?
AC_ARG_WITH([gnutls],
[AS_HELP_STRING([--with-gnutls],
[support Pacemaker Remote and remote-tls-port using GnuTLS @<:@try@:>@])]
)
yes_no_try "$with_gnutls" "try"
with_gnutls=$?
PCMK_GNUTLS_PRIORITIES="NORMAL"
AC_ARG_WITH([gnutls-priorities],
[AS_HELP_STRING([--with-gnutls-priorities],
[default GnuTLS cipher priorities @<:@NORMAL@:>@])],
[ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ]
)
AC_ARG_WITH([concurrent-fencing-default],
[AS_HELP_STRING([--with-concurrent-fencing-default],
[default value for concurrent-fencing cluster option @<:@false@:>@])],
)
AS_CASE([$with_concurrent_fencing_default],
[""], [with_concurrent_fencing_default="false"],
[false], [],
[true], [PCMK_FEATURES="$PCMK_FEATURES default-concurrent-fencing"],
[AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])]
)
AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT],
["$with_concurrent_fencing_default"],
[Default value for concurrent-fencing cluster option])
AC_ARG_WITH([sbd-sync-default],
[AS_HELP_STRING([--with-sbd-sync-default], m4_normalize([
default value used by sbd if SBD_SYNC_RESOURCE_STARTUP
environment variable is not set @<:@false@:>@]))],
)
AS_CASE([$with_sbd_sync_default],
[""], [with_sbd_sync_default=false],
[false], [],
[true], [PCMK_FEATURES="$PCMK_FEATURES default-sbd-sync"],
[AC_MSG_ERROR([Invalid value "$with_sbd_sync_default" for --with-sbd-sync-default])]
)
AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT],
[$with_sbd_sync_default],
[Default value for SBD_SYNC_RESOURCE_STARTUP environment variable])
AC_ARG_WITH([resource-stickiness-default],
[AS_HELP_STRING([--with-resource-stickiness-default],
[If positive, value to add to new CIBs as explicit resource default for resource-stickiness @<:@0@:>@])],
)
errmsg="Invalid value \"$with_resource_stickiness_default\" for --with-resource-stickiness-default"
AS_CASE([$with_resource_stickiness_default],
[0|""], [with_resource_stickiness_default="0"],
[*[[!0-9]]*], [AC_MSG_ERROR([$errmsg])],
[PCMK_FEATURES="$PCMK_FEATURES default-resource-stickiness"]
)
AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT],
[$with_resource_stickiness_default],
[Default value for resource-stickiness resource meta-attribute])
AC_ARG_WITH([corosync],
[AS_HELP_STRING([--with-corosync],
[support the Corosync messaging and membership layer @<:@try@:>@])]
)
yes_no_try "$with_corosync" "try"
with_corosync=$?
dnl Get default from corosync if possible.
PKG_CHECK_VAR([PCMK__COROSYNC_CONF], [corosync], [corosysconfdir],
[PCMK__COROSYNC_CONF="$PCMK__COROSYNC_CONF/corosync.conf"],
[PCMK__COROSYNC_CONF="${sysconfdir}/corosync/corosync.conf"])
AC_ARG_WITH([corosync-conf],
[AS_HELP_STRING([--with-corosync-conf], m4_normalize([
location of Corosync configuration file
@<:@value from Corosync package if available otherwise
SYSCONFDIR/corosync/corosync.conf@:>@]))],
[ PCMK__COROSYNC_CONF="$withval" ]
)
AC_ARG_WITH([nagios],
[AS_HELP_STRING([--with-nagios], [support nagios resources (deprecated)])]
)
yes_no_try "$with_nagios" "try"
with_nagios=$?
dnl --with-* options: directory locations
AC_ARG_WITH([nagios-plugin-dir],
[AS_HELP_STRING([--with-nagios-plugin-dir=DIR],
[directory for nagios plugins (deprecated) @<:@LIBEXECDIR/nagios/plugins@:>@])],
[ NAGIOS_PLUGIN_DIR="$withval" ]
)
AC_ARG_WITH([nagios-metadata-dir],
[AS_HELP_STRING([--with-nagios-metadata-dir=DIR],
[directory for nagios plugins metadata (deprecated) @<:@DATADIR/nagios/plugins-metadata@:>@])],
[ NAGIOS_METADATA_DIR="$withval" ]
)
INITDIR=""
AC_ARG_WITH([initdir],
[AS_HELP_STRING([--with-initdir=DIR],
[directory for init (rc) scripts])],
[ INITDIR="$withval" ]
)
systemdsystemunitdir="${systemdsystemunitdir-}"
AC_ARG_WITH([systemdsystemunitdir],
[AS_HELP_STRING([--with-systemdsystemunitdir=DIR],
[directory for systemd unit files (advanced option: must match what systemd uses)])],
[ systemdsystemunitdir="$withval" ]
)
CONFIGDIR=""
AC_ARG_WITH([configdir],
[AS_HELP_STRING([--with-configdir=DIR],
[directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])],
[ CONFIGDIR="$withval" ]
)
dnl --runstatedir is available as of autoconf 2.70 (2020-12-08). When users
dnl have an older version, they can use our --with-runstatedir.
pcmk_runstatedir=""
AC_ARG_WITH([runstatedir],
[AS_HELP_STRING([--with-runstatedir=DIR],
[modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])],
[ pcmk_runstatedir="$withval" ]
)
CRM_LOG_DIR=""
AC_ARG_WITH([logdir],
[AS_HELP_STRING([--with-logdir=DIR],
[directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])],
[ CRM_LOG_DIR="$withval" ]
)
CRM_BUNDLE_DIR=""
AC_ARG_WITH([bundledir],
[AS_HELP_STRING([--with-bundledir=DIR],
[directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])],
[ CRM_BUNDLE_DIR="$withval" ]
)
dnl Get default from resource-agents if possible. Otherwise, the default uses
dnl /usr/lib rather than libdir because it's determined by the OCF project and
dnl not Pacemaker. Even if a user wants to install Pacemaker to /usr/local or
dnl such, the OCF agents will be expected in their usual location. However, we
dnl do give the user the option to override it.
PKG_CHECK_VAR([OCF_ROOT_DIR], [resource-agents], [ocfrootdir], [],
[OCF_ROOT_DIR="/usr/lib/ocf"])
AC_ARG_WITH([ocfdir],
[AS_HELP_STRING([--with-ocfdir=DIR], m4_normalize([
OCF resource agent root directory (advanced option: changing this
may break other cluster components unless similarly configured)
@<:@value from resource-agents package if available otherwise
/usr/lib/ocf@:>@]))],
[ OCF_ROOT_DIR="$withval" ]
)
AC_SUBST(OCF_ROOT_DIR)
AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"],
[OCF root directory for resource agents and libraries])
PKG_CHECK_VAR([OCF_RA_PATH], [resource-agents], [ocfrapath], [],
[OCF_RA_PATH="$OCF_ROOT_DIR/resource.d"])
AC_ARG_WITH([ocfrapath],
[AS_HELP_STRING([--with-ocfrapath=DIR], m4_normalize([
OCF resource agent directories (colon-separated) to search
@<:@value from resource-agents package if available otherwise
OCFDIR/resource.d@:>@]))],
[ OCF_RA_PATH="$withval" ]
)
AC_SUBST(OCF_RA_PATH)
OCF_RA_INSTALL_DIR="$OCF_ROOT_DIR/resource.d"
AC_ARG_WITH([ocfrainstalldir],
[AS_HELP_STRING([--with-ocfrainstalldir=DIR], m4_normalize([
OCF installation directory for Pacemakers resource agents
@<:@OCFDIR/resource.d@:>@]))],
[ OCF_RA_INSTALL_DIR="$withval" ]
)
AC_SUBST(OCF_RA_INSTALL_DIR)
dnl Get default from fence-agents if available
PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix],
[PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"],
[PCMK__FENCE_BINDIR="$sbindir"])
AC_ARG_WITH([fence-bindir],
[AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([
directory for executable fence agents @<:@value from fence-agents
package if available otherwise SBINDIR@:>@]))],
[ PCMK__FENCE_BINDIR="$withval" ]
)
AC_SUBST(PCMK__FENCE_BINDIR)
dnl --with-* options: non-production testing
AC_ARG_WITH([profiling],
[AS_HELP_STRING([--with-profiling],
[disable optimizations, for effective profiling @<:@no@:>@])]
)
yes_no_try "$with_profiling" "no"
with_profiling=$?
AC_ARG_WITH([coverage],
[AS_HELP_STRING([--with-coverage],
[disable optimizations, for effective profiling and coverage testing @<:@no@:>@])]
)
yes_no_try "$with_coverage" "no"
with_coverage=$?
AC_ARG_WITH([sanitizers],
[AS_HELP_STRING([--with-sanitizers=...,...],
[enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])],
[ SANITIZERS="$withval" ],
[ SANITIZERS="" ])
dnl Environment variable options
AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries])
AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries])
AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables])
AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables])
dnl ===============================================
dnl General Processing
dnl ===============================================
AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION",
[Version number of this Pacemaker build])
PACKAGE_SERIES=`echo $VERSION | awk -F. '{ print $1"."$2 }'`
AC_SUBST(PACKAGE_SERIES)
AC_PROG_LN_S
AC_PROG_MKDIR_P
# Check for fatal warning support
AS_IF([test $enable_fatal_warnings -ne $DISABLED && test x"$GCC" = x"yes" && cc_supports_flag -Werror],
[WERROR="-Werror"],
[
WERROR=""
AS_CASE([$enable_fatal_warnings],
[$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])],
[$OPTIONAL], [
AC_MSG_NOTICE([Compiler does not support fatal warnings])
enable_fatal_warnings=$DISABLED
])
])
AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}])
AS_CASE([$INITDIR],
[prefix], [INITDIR=$prefix],
[""], [
AC_MSG_CHECKING([which init (rc) directory to use])
for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \
/usr/local/etc/rc.d /etc/rc.d
do
AS_IF([test -d $initdir],
[
INITDIR=$initdir
break
])
done
AC_MSG_RESULT([$INITDIR])
])
AC_SUBST(INITDIR)
dnl Expand values of autoconf-provided directory options
expand_path_option prefix
expand_path_option exec_prefix
expand_path_option bindir
expand_path_option sbindir
expand_path_option libexecdir
expand_path_option datadir
expand_path_option sysconfdir
expand_path_option sharedstatedir
expand_path_option localstatedir
expand_path_option libdir
expand_path_option includedir
expand_path_option oldincludedir
expand_path_option infodir
expand_path_option mandir
dnl Home-grown variables
expand_path_option localedir "${datadir}/locale"
AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"], [Base directory for message catalogs])
AS_IF([test x"${runstatedir}" = x""], [runstatedir="${pcmk_runstatedir}"])
expand_path_option runstatedir "${localstatedir}/run"
AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"],
[Location for modifiable per-process data])
AC_SUBST(runstatedir)
expand_path_option INITDIR
AC_DEFINE_UNQUOTED([PCMK__LSB_INIT_DIR], ["$INITDIR"],
[Location for LSB init scripts])
expand_path_option docdir "${datadir}/doc/${PACKAGE}-${VERSION}"
AC_SUBST(docdir)
expand_path_option CONFIGDIR "${sysconfdir}/sysconfig"
AC_SUBST(CONFIGDIR)
expand_path_option PCMK__COROSYNC_CONF "${sysconfdir}/corosync/corosync.conf"
AC_SUBST(PCMK__COROSYNC_CONF)
expand_path_option CRM_LOG_DIR "${localstatedir}/log/pacemaker"
AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file)
AC_SUBST(CRM_LOG_DIR)
expand_path_option CRM_BUNDLE_DIR "${localstatedir}/log/pacemaker/bundles"
AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs)
AC_SUBST(CRM_BUNDLE_DIR)
expand_path_option PCMK__FENCE_BINDIR
AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR",
[Location for executable fence agents])
expand_path_option OCF_RA_PATH
AC_DEFINE_UNQUOTED([OCF_RA_PATH], ["$OCF_RA_PATH"],
[OCF directories to search for resource agents ])
AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [],
[AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])])
AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"],
[GnuTLS cipher priorities])
AC_SUBST(PCMK_GNUTLS_PRIORITIES)
AS_IF([test x"${BUG_URL}" = x""],
[BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"])
AC_SUBST(BUG_URL)
AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"],
[Where bugs should be reported])
for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
sharedstatedir localstatedir libdir includedir oldincludedir infodir \
mandir INITDIR docdir CONFIGDIR localedir
do
dirname=`eval echo '${'${j}'}'`
AS_IF([test ! -d "$dirname"],
[AC_MSG_WARN([$j directory ($dirname) does not exist (yet)])])
done
us_auth=
AC_CHECK_HEADER([sys/socket.h], [
AC_CHECK_DECL([SO_PEERCRED], [
# Linux
AC_CHECK_TYPE([struct ucred], [
us_auth=peercred_ucred;
AC_DEFINE([HAVE_UCRED], [1],
[Define if Unix socket auth method is
getsockopt(s, SO_PEERCRED, &ucred, ...)])
], [
# OpenBSD
AC_CHECK_TYPE([struct sockpeercred], [
us_auth=localpeercred_sockepeercred;
AC_DEFINE([HAVE_SOCKPEERCRED], [1],
[Define if Unix socket auth method is
getsockopt(s, SO_PEERCRED, &sockpeercred, ...)])
], [], [[#include <sys/socket.h>]])
], [[#define _GNU_SOURCE
#include <sys/socket.h>]])
], [], [[#include <sys/socket.h>]])
])
AS_IF([test -z "${us_auth}"], [
# FreeBSD
AC_CHECK_DECL([getpeereid], [
us_auth=getpeereid;
AC_DEFINE([HAVE_GETPEEREID], [1],
[Define if Unix socket auth method is
getpeereid(s, &uid, &gid)])
], [
# Solaris/OpenIndiana
AC_CHECK_DECL([getpeerucred], [
us_auth=getpeerucred;
AC_DEFINE([HAVE_GETPEERUCRED], [1],
[Define if Unix socket auth method is
getpeercred(s, &ucred)])
], [
AC_MSG_FAILURE([No way to authenticate a Unix socket peer])
], [[#include <ucred.h>]])
])
])
dnl OS-based decision-making is poor autotools practice; feature-based
dnl mechanisms are strongly preferred. Keep this section to a bare minimum;
dnl regard as a "necessary evil".
INIT_EXT=""
PROCFS=0
dnl Solaris and some *BSD versions support procfs but not files we need
AS_CASE(["$host_os"],
[*bsd*], [INIT_EXT=".sh"],
[*linux*], [PROCFS=1],
[darwin*], [
LIBS="$LIBS -L${prefix}/lib"
CFLAGS="$CFLAGS -I${prefix}/include"
])
AC_SUBST(INIT_EXT)
AM_CONDITIONAL([SUPPORT_PROCFS], [test $PROCFS -eq 1])
AC_DEFINE_UNQUOTED([HAVE_LINUX_PROCFS], [$PROCFS],
[Define to 1 if procfs is supported])
AS_CASE(["$host_cpu"],
[ppc64|powerpc64], [
AS_CASE([$CFLAGS],
[*powerpc64*], [],
[*], [AS_IF([test x"$GCC" = x"yes"], [CFLAGS="$CFLAGS -m64"])
])
])
dnl ===============================================
dnl Program Paths
dnl ===============================================
PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
export PATH
dnl Pacemaker's executable python scripts will invoke the python specified by
dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a
dnl built-in list with (unversioned) "python" having precedence. To configure
dnl Pacemaker to use a specific python interpreter version, define PYTHON
dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6
dnl Ensure PYTHON is an absolute path
AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])])
dnl Require a minimum Python version
AM_PATH_PYTHON([3.4])
AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor])
AC_PATH_PROG([HELP2MAN], [help2man])
AC_PATH_PROG([SPHINX], [sphinx-build])
AC_PATH_PROG([INKSCAPE], [inkscape])
AC_PATH_PROG([XSLTPROC], [xsltproc])
AC_PATH_PROG([XMLCATALOG], [xmlcatalog])
dnl Bash is needed for building man pages and running regression tests.
dnl BASH is already an environment variable, so use something else.
AC_PATH_PROG([BASH_PATH], [bash])
AS_IF([test x"${BASH_PATH}" != x""], [],
[AC_MSG_FAILURE([Could not find required build tool bash])])
AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"")
AS_IF([test x"${HELP2MAN}" != x""],
[PCMK_FEATURES="$PCMK_FEATURES generated-manpages"])
MANPAGE_XSLT=""
AS_IF([test x"${XSLTPROC}" != x""],
[
AC_MSG_CHECKING([for DocBook-to-manpage transform])
# first try to figure out correct template using xmlcatalog query,
# resort to extensive (semi-deterministic) file search if that fails
DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current'
DOCBOOK_XSL_PATH='manpages/docbook.xsl'
MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \
| sed -n 's|^file://||p;q')
AS_IF([test x"${MANPAGE_XSLT}" = x""],
[
DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \
-type d 2>/dev/null | LC_ALL=C sort)
XSLT=$(basename ${DOCBOOK_XSL_PATH})
for d in ${DIRS}
do
AS_IF([test -f "${d}/${XSLT}"],
[
MANPAGE_XSLT="${d}/${XSLT}"
break
])
done
])
])
AC_MSG_RESULT([$MANPAGE_XSLT])
AC_SUBST(MANPAGE_XSLT)
AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"")
AS_IF([test x"${MANPAGE_XSLT}" != x""],
[PCMK_FEATURES="$PCMK_FEATURES agent-manpages"])
AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$'])
AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x])
AS_IF([test x"${ASCIIDOC_CONV}" != x""],
[PCMK_FEATURES="$PCMK_FEATURES ascii-docs"])
AM_CONDITIONAL([BUILD_SPHINX_DOCS],
[test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""])
AM_COND_IF([BUILD_SPHINX_DOCS], [PCMK_FEATURES="$PCMK_FEATURES books"])
dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt
AC_MSG_CHECKING([for GNU-compatible getopt])
IFS_orig=$IFS
IFS=:
for PATH_DIR in $PATH
do
IFS=$IFS_orig
GETOPT_PATH="${PATH_DIR}/getopt"
AS_IF([test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH"],
[
$GETOPT_PATH -T >/dev/null 2>/dev/null
AS_IF([test $? -eq 4], [break])
])
GETOPT_PATH=""
done
IFS=$IFS_orig
AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])],
[
AC_MSG_RESULT([no])
AC_MSG_ERROR([Could not find required build tool GNU-compatible getopt])
])
AC_SUBST([GETOPT_PATH])
dnl ========================================================================
dnl checks for library functions to replace them
dnl
dnl NoSuchFunctionName:
dnl is a dummy function which no system supplies. It is here to make
dnl the system compile semi-correctly on OpenBSD which doesn't know
dnl how to create an empty archive
dnl
dnl scandir: Only on BSD.
dnl System-V systems may have it, but hidden and/or deprecated.
dnl A replacement function is supplied for it.
dnl
dnl strerror: returns a string that corresponds to an errno.
dnl A replacement function is supplied for it.
dnl
dnl strnlen: is a gnu function similar to strlen, but safer.
dnl We wrote a tolerably-fast replacement function for it.
dnl
dnl strndup: is a gnu function similar to strdup, but safer.
dnl We wrote a tolerably-fast replacement function for it.
AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir strerror strchrnul strnlen strndup)
dnl ===============================================
dnl Libraries
dnl ===============================================
AC_CHECK_LIB(socket, socket) dnl -lsocket
AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc...
AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux)
AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64)
AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available )
AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available)
PKG_CHECK_MODULES([UUID], [uuid],
[CPPFLAGS="${CPPFLAGS} ${UUID_CFLAGS}"
LIBS="${LIBS} ${UUID_LIBS}"])
AC_CHECK_FUNCS([sched_setscheduler])
AS_IF([test x"$ac_cv_func_sched_setscheduler" != x"yes"],
[PC_LIBS_RT=""],
[PC_LIBS_RT="-lrt"])
AC_SUBST(PC_LIBS_RT)
# Require minimum glib version
PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.42.0],
[CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}"
LIBS="${LIBS} ${GLIB_LIBS}"])
# Check whether high-resolution sleep function is available
AC_CHECK_FUNCS([nanosleep usleep])
#
# Where is dlopen?
#
AS_IF([test x"$ac_cv_lib_c_dlopen" = x"yes"],
[LIBADD_DL=""],
[test x"$ac_cv_lib_dl_dlopen" = x"yes"],
[LIBADD_DL=-ldl],
[LIBADD_DL=${lt_cv_dlopen_libs}])
PKG_CHECK_MODULES(LIBXML2, [libxml-2.0],
[CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}"
LIBS="${LIBS} ${LIBXML2_LIBS}"])
REQUIRE_LIB([xslt], [xsltApplyStylesheet])
dnl ========================================================================
dnl Headers
dnl ========================================================================
# Some distributions insert #warnings into deprecated headers. If we will
# enable fatal warnings for the build, then enable them for the header checks
# as well, otherwise the build could fail even though the header check
# succeeds. (We should probably be doing this in more places.)
cc_temp_flags "$CFLAGS $WERROR"
# Optional headers (inclusion of these should be conditional in C code)
AC_CHECK_HEADERS([linux/swab.h])
AC_CHECK_HEADERS([stddef.h])
AC_CHECK_HEADERS([sys/signalfd.h])
AC_CHECK_HEADERS([uuid/uuid.h])
AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h])
# Required headers
REQUIRE_HEADER([arpa/inet.h])
REQUIRE_HEADER([ctype.h])
REQUIRE_HEADER([dirent.h])
REQUIRE_HEADER([errno.h])
REQUIRE_HEADER([glib.h])
REQUIRE_HEADER([grp.h])
REQUIRE_HEADER([limits.h])
REQUIRE_HEADER([netdb.h])
REQUIRE_HEADER([netinet/in.h])
REQUIRE_HEADER([netinet/ip.h], [
#include <sys/types.h>
#include <netinet/in.h>
])
REQUIRE_HEADER([pwd.h])
REQUIRE_HEADER([signal.h])
REQUIRE_HEADER([stdio.h])
REQUIRE_HEADER([stdlib.h])
REQUIRE_HEADER([string.h])
REQUIRE_HEADER([strings.h])
REQUIRE_HEADER([sys/ioctl.h])
REQUIRE_HEADER([sys/param.h])
REQUIRE_HEADER([sys/reboot.h])
REQUIRE_HEADER([sys/resource.h])
REQUIRE_HEADER([sys/socket.h])
REQUIRE_HEADER([sys/stat.h])
REQUIRE_HEADER([sys/time.h])
REQUIRE_HEADER([sys/types.h])
REQUIRE_HEADER([sys/utsname.h])
REQUIRE_HEADER([sys/wait.h])
REQUIRE_HEADER([time.h])
REQUIRE_HEADER([unistd.h])
REQUIRE_HEADER([libxml/xpath.h])
REQUIRE_HEADER([libxslt/xslt.h])
cc_restore_flags
AC_CHECK_FUNCS([uuid_unparse], [],
[AC_MSG_FAILURE([Could not find required C function uuid_unparse()])])
AC_CACHE_CHECK([whether __progname and __progname_full are available],
[pf_cv_var_progname],
[AC_LINK_IFELSE(
[AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]],
[[__progname = "foo"; __progname_full = "foo bar";]])],
[pf_cv_var_progname="yes"],
[pf_cv_var_progname="no"]
)]
)
AS_IF([test x"$pf_cv_var_progname" = x"yes"],
[AC_DEFINE(HAVE_PROGNAME,1,[Define to 1 if processes can change their name])])
dnl ========================================================================
dnl Generic declarations
dnl ========================================================================
AC_CHECK_DECLS([CLOCK_MONOTONIC], [PCMK_FEATURES="$PCMK_FEATURES monotonic"], [], [[
#include <time.h>
]])
dnl ========================================================================
dnl Unit test declarations
dnl ========================================================================
AC_CHECK_DECLS([assert_float_equal], [], [], [[
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
]])
cc_temp_flags "$CFLAGS -Wl,--wrap=uname"
WRAPPABLE_UNAME="no"
AC_MSG_CHECKING([if uname() can be wrapped])
AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include <sys/utsname.h>
int __wrap_uname(struct utsname *buf) {
return 100;
}
int main(int argc, char **argv) {
struct utsname x;
return uname(&x) == 100 ? 0 : 1;
}
]])],
[ WRAPPABLE_UNAME="yes" ], [ WRAPPABLE_UNAME="no"])
AC_MSG_RESULT([$WRAPPABLE_UNAME])
AM_CONDITIONAL([WRAPPABLE_UNAME], [test x"$WRAPPABLE_UNAME" = x"yes"])
cc_restore_flags
dnl ========================================================================
dnl Structures
dnl ========================================================================
AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include <time.h>]])
AC_CHECK_MEMBER([struct dirent.d_type],
AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),,
[#include <dirent.h>])
dnl ========================================================================
dnl Functions
dnl ========================================================================
REQUIRE_FUNC([getopt])
REQUIRE_FUNC([setenv])
REQUIRE_FUNC([unsetenv])
REQUIRE_FUNC([vasprintf])
AC_CACHE_CHECK(whether sscanf supports %m,
pf_cv_var_sscanf,
AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include <stdio.h>
const char *s = "some-command-line-arg";
int main(int argc, char **argv) {
char *name = NULL;
int n = sscanf(s, "%ms", &name);
return n == 1 ? 0 : 1;
}
]])],
pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no"))
AS_IF([test x"$pf_cv_var_sscanf" = x"yes"],
[AC_DEFINE([HAVE_SSCANF_M], [1],
[Define to 1 if sscanf %m modifier is available])])
dnl ========================================================================
dnl bzip2
dnl ========================================================================
REQUIRE_HEADER([bzlib.h])
REQUIRE_LIB([bz2], [BZ2_bzBuffToBuffCompress])
dnl ========================================================================
dnl sighandler_t is missing from Illumos, Solaris11 systems
dnl ========================================================================
AC_MSG_CHECKING([for sighandler_t])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <signal.h>]], [[sighandler_t *f;]])],
[
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_SIGHANDLER_T], [1],
[Define to 1 if sighandler_t is available])
],
[AC_MSG_RESULT([no])])
dnl ========================================================================
dnl ncurses
dnl ========================================================================
dnl
dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses".
dnl Many non-Linux deliver "curses"; sites may add "ncurses".
dnl
dnl However, the source-code recommendation for both is to #include "curses.h"
dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h").
dnl
dnl ncurses takes precedence.
dnl
AC_CHECK_HEADERS([curses.h curses/curses.h ncurses.h ncurses/ncurses.h])
dnl Although n-library is preferred, only look for it if the n-header was found.
CURSESLIBS=''
PC_NAME_CURSES=""
PC_LIBS_CURSES=""
AS_IF([test x"$ac_cv_header_ncurses_h" = x"yes"], [
AC_CHECK_LIB(ncurses, printw,
[AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)])
CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
PC_NAME_CURSES="ncurses"
])
AS_IF([test x"$ac_cv_header_ncurses_ncurses_h" = x"yes"], [
AC_CHECK_LIB(ncurses, printw,
[AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)])
CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
PC_NAME_CURSES="ncurses"
])
dnl Only look for non-n-library if there was no n-library.
AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_h" = x"yes"], [
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)])
PC_LIBS_CURSES="$CURSESLIBS"
])
dnl Only look for non-n-library if there was no n-library.
AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_curses_h" = x"yes"], [
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)])
PC_LIBS_CURSES="$CURSESLIBS"
])
AS_IF([test x"$CURSESLIBS" != x""],
[PCMK_FEATURES="$PCMK_FEATURES ncurses"])
dnl Check for printw() prototype compatibility
AS_IF([test x"$CURSESLIBS" != x"" && cc_supports_flag -Wcast-qual], [
ac_save_LIBS=$LIBS
LIBS="$CURSESLIBS"
# avoid broken test because of hardened build environment in Fedora 23+
# - https://fedoraproject.org/wiki/Changes/Harden_All_Packages
# - https://bugzilla.redhat.com/1297985
AS_IF([cc_supports_flag -fPIC],
[cc_temp_flags "-Wcast-qual $WERROR -fPIC"],
[cc_temp_flags "-Wcast-qual $WERROR"])
AC_MSG_CHECKING([whether curses library is compatible])
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([
#if defined(HAVE_NCURSES_H)
# include <ncurses.h>
#elif defined(HAVE_NCURSES_NCURSES_H)
# include <ncurses/ncurses.h>
#elif defined(HAVE_CURSES_H)
# include <curses.h>
#endif
],
[printw((const char *)"Test");]
)],
[AC_MSG_RESULT([yes])],
[
AC_MSG_RESULT([no])
AC_MSG_WARN(m4_normalize([Disabling curses because the printw()
function of your (n)curses library is old.
If you wish to enable curses, update to a
newer version (ncurses 5.4 or later is
recommended, available from
https://invisible-island.net/ncurses/)
]))
AC_DEFINE([HAVE_INCOMPATIBLE_PRINTW], [1],
[Define to 1 if curses library has incompatible printw()])
]
)
LIBS=$ac_save_LIBS
cc_restore_flags
])
AC_SUBST(CURSESLIBS)
AC_SUBST(PC_NAME_CURSES)
AC_SUBST(PC_LIBS_CURSES)
dnl ========================================================================
dnl Profiling and GProf
dnl ========================================================================
CFLAGS_ORIG="$CFLAGS"
AS_IF([test $with_coverage -ne $DISABLED],
[
with_profiling=$REQUIRED
PCMK_FEATURES="$PCMK_FEATURES coverage"
CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage"
dnl During linking, make sure to specify -lgcov or -coverage
]
)
AS_IF([test $with_profiling -ne $DISABLED],
[
with_profiling=$REQUIRED
PCMK_FEATURES="$PCMK_FEATURES profile"
dnl Disable various compiler optimizations
CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin"
dnl CFLAGS="$CFLAGS -fno-inline-functions"
dnl CFLAGS="$CFLAGS -fno-default-inline"
dnl CFLAGS="$CFLAGS -fno-inline-functions-called-once"
dnl CFLAGS="$CFLAGS -fno-optimize-sibling-calls"
dnl Turn off optimization so tools can get accurate line numbers
CFLAGS=`echo $CFLAGS | sed \
-e 's/-O.\ //g' \
-e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' \
-e 's/-D_FORTIFY_SOURCE=.\ //g'`
CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2"
AC_MSG_NOTICE([CFLAGS before adding profiling options: $CFLAGS_ORIG])
AC_MSG_NOTICE([CFLAGS after: $CFLAGS])
]
)
AC_DEFINE_UNQUOTED([SUPPORT_PROFILING], [$with_profiling], [Support profiling])
AM_CONDITIONAL([BUILD_PROFILING], [test "$with_profiling" = "$REQUIRED"])
dnl ========================================================================
dnl Cluster infrastructure - LibQB
dnl ========================================================================
PKG_CHECK_MODULES(libqb, libqb >= 0.17)
CPPFLAGS="$libqb_CFLAGS $CPPFLAGS"
LIBS="$libqb_LIBS $LIBS"
dnl libqb 2.0.5+ (2022-03)
AC_CHECK_FUNCS([qb_ipcc_connect_async])
dnl libqb 2.0.2+ (2020-10)
AC_CHECK_FUNCS([qb_ipcc_auth_get])
dnl libqb 2.0.0+ (2020-05)
CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN])
CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS])
dnl Support Linux-HA fence agents if available
AS_IF([test x"$cross_compiling" != x"yes"],
[CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat"])
AC_CHECK_HEADERS([stonith/stonith.h],
[
AC_CHECK_LIB([pils], [PILLoadPlugin])
AC_CHECK_LIB([plumb], [G_main_add_IPC_Channel])
PCMK_FEATURES="$PCMK_FEATURES lha"
])
AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test x"$ac_cv_header_stonith_stonith_h" = x"yes"])
dnl ===============================================
dnl Variables needed for substitution
dnl ===============================================
CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker"
AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema)
AC_SUBST(CRM_SCHEMA_DIRECTORY)
CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores"
AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"],
[Directory Pacemaker daemons should change to (without systemd, core files will go here)])
AC_SUBST(CRM_CORE_DIR)
AS_IF([test x"${CRM_DAEMON_USER}" = x""],
[CRM_DAEMON_USER="hacluster"])
AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_USER)
AS_IF([test x"${CRM_DAEMON_GROUP}" = x""],
[CRM_DAEMON_GROUP="haclient"])
AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_GROUP)
CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker
AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons)
AC_SUBST(CRM_PACEMAKER_DIR)
CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox
AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps)
AC_SUBST(CRM_BLACKBOX_DIR)
PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine"
AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs)
AC_SUBST(PE_STATE_DIR)
CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib"
AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files)
AC_SUBST(CRM_CONFIG_DIR)
CRM_DAEMON_DIR="${libexecdir}/pacemaker"
AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons)
AC_SUBST(CRM_DAEMON_DIR)
CRM_STATE_DIR="${runstatedir}/crm"
AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"],
[Where to keep state files and sockets])
AC_SUBST(CRM_STATE_DIR)
CRM_RSCTMP_DIR="${runstatedir}/resource-agents"
AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files)
AC_SUBST(CRM_RSCTMP_DIR)
PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker"
AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey)
AC_SUBST(PACEMAKER_CONFIG_DIR)
AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries])
AC_PATH_PROGS(GIT, git false)
AC_MSG_CHECKING([build version])
BUILD_VERSION=$Format:%h$
AS_IF([test $BUILD_VERSION != ":%h$"],
[AC_MSG_RESULT([$BUILD_VERSION (archive hash)])],
[test -x $GIT && test -d .git],
[
BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1`
AC_MSG_RESULT([$BUILD_VERSION (git hash)])
],
[
# The current directory name make a reasonable default
# Most generated archives will include the hash or tag
BASE=`basename $PWD`
BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::`
AC_MSG_RESULT([$BUILD_VERSION (directory name)])
])
AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
AC_SUBST(BUILD_VERSION)
HAVE_dbus=1
PKG_CHECK_MODULES([DBUS], [dbus-1],
[CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"],
[HAVE_dbus=0])
AC_DEFINE_UNQUOTED(HAVE_DBUS, $HAVE_dbus, Support dbus)
AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1)
dnl libdbus 1.5.12+ (2012-03) / 1.6.0+ (2012-06)
AC_CHECK_TYPES([DBusBasicValue],,,[[#include <dbus/dbus.h>]])
AS_IF([test $HAVE_dbus = 0],
[PC_NAME_DBUS=""],
[PC_NAME_DBUS="dbus-1"])
AC_SUBST(PC_NAME_DBUS)
AS_CASE([$enable_systemd],
[$REQUIRED], [
AS_IF([test $HAVE_dbus = 0],
[AC_MSG_FAILURE([Cannot support systemd resources without DBus])])
AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"],
[AC_MSG_FAILURE([Cannot support systemd resources without monotonic clock])])
AS_IF([check_systemdsystemunitdir], [],
[AC_MSG_FAILURE([Cannot support systemd resources without systemdsystemunitdir])])
],
[$OPTIONAL], [
AS_IF([test $HAVE_dbus = 0 \
|| test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"],
[enable_systemd=$DISABLED],
[
AC_MSG_CHECKING([for systemd version (using dbus-send)])
ret=$({ dbus-send --system --print-reply \
--dest=org.freedesktop.systemd1 \
/org/freedesktop/systemd1 \
org.freedesktop.DBus.Properties.Get \
string:org.freedesktop.systemd1.Manager \
string:Version 2>/dev/null \
|| echo "version unavailable"; } | tail -n1)
# sanitize output a bit (interested just in value, not type),
# ret is intentionally unenquoted so as to normalize whitespace
ret=$(echo ${ret} | cut -d' ' -f2-)
AC_MSG_RESULT([${ret}])
AS_IF([test x"$ret" != x"unavailable" \
|| systemctl --version 2>/dev/null | grep -q systemd],
[
AS_IF([check_systemdsystemunitdir],
[enable_systemd=$REQUIRED],
[enable_systemd=$DISABLED])
],
[enable_systemd=$DISABLED]
)
])
],
)
AC_MSG_CHECKING([whether to enable support for managing resources via systemd])
AS_IF([test $enable_systemd -eq $DISABLED], [AC_MSG_RESULT([no])],
[
AC_MSG_RESULT([yes])
PCMK_FEATURES="$PCMK_FEATURES systemd"
]
)
AC_SUBST([systemdsystemunitdir])
AC_DEFINE_UNQUOTED([SUPPORT_SYSTEMD], [$enable_systemd],
[Support systemd resources])
AM_CONDITIONAL([BUILD_SYSTEMD], [test $enable_systemd = $REQUIRED])
AC_SUBST(SUPPORT_SYSTEMD)
AS_CASE([$enable_upstart],
[$REQUIRED], [
AS_IF([test $HAVE_dbus = 0],
[AC_MSG_FAILURE([Cannot support Upstart resources without DBus])])
],
[$OPTIONAL], [
AS_IF([test $HAVE_dbus = 0], [enable_upstart=$DISABLED],
[
AC_MSG_CHECKING([for Upstart version (using dbus-send)])
ret=$({ dbus-send --system --print-reply \
--dest=com.ubuntu.Upstart \
/com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \
string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \
|| echo "version unavailable"; } | tail -n1)
# sanitize output a bit (interested just in value, not type),
# ret is intentionally unenquoted so as to normalize whitespace
ret=$(echo ${ret} | cut -d' ' -f2-)
AC_MSG_RESULT([${ret}])
AS_IF([test x"$ret" != x"unavailable" \
|| initctl --version 2>/dev/null | grep -q upstart],
[enable_upstart=$REQUIRED],
[enable_upstart=$DISABLED]
)
])
],
)
AC_MSG_CHECKING([whether to enable support for managing resources via Upstart])
AS_IF([test $enable_upstart -eq $DISABLED], [AC_MSG_RESULT([no])],
[
AC_MSG_RESULT([yes])
PCMK_FEATURES="$PCMK_FEATURES upstart"
]
)
AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart],
[Support Upstart resources])
AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED])
AC_SUBST(SUPPORT_UPSTART)
AS_CASE([$with_nagios],
[$REQUIRED], [
AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"],
[AC_MSG_FAILURE([Cannot support nagios resources without monotonic clock])])
],
[$OPTIONAL], [
AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"],
[with_nagios=$DISABLED], [with_nagios=$REQUIRED])
]
)
AS_IF([test $with_nagios -eq $REQUIRED], [PCMK_FEATURES="$PCMK_FEATURES nagios"])
AC_DEFINE_UNQUOTED([SUPPORT_NAGIOS], [$with_nagios], [Support nagios plugins])
AM_CONDITIONAL([BUILD_NAGIOS], [test $with_nagios -eq $REQUIRED])
AS_IF([test x"$NAGIOS_PLUGIN_DIR" = x""],
[NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"])
AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins)
AC_SUBST(NAGIOS_PLUGIN_DIR)
AS_IF([test x"$NAGIOS_METADATA_DIR" = x""],
[NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata"])
AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata)
AC_SUBST(NAGIOS_METADATA_DIR)
STACKS=""
CLUSTERLIBS=""
PC_NAME_CLUSTER=""
dnl ========================================================================
dnl Cluster stack - Corosync
dnl ========================================================================
COROSYNC_LIBS=""
AS_CASE([$with_corosync],
[$REQUIRED], [
# These will be fatal if unavailable
PKG_CHECK_MODULES([cpg], [libcpg])
PKG_CHECK_MODULES([cfg], [libcfg])
PKG_CHECK_MODULES([cmap], [libcmap])
PKG_CHECK_MODULES([quorum], [libquorum])
PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common])
]
[$OPTIONAL], [
PKG_CHECK_MODULES([cpg], [libcpg], [], [with_corosync=$DISABLED])
PKG_CHECK_MODULES([cfg], [libcfg], [], [with_corosync=$DISABLED])
PKG_CHECK_MODULES([cmap], [libcmap], [], [with_corosync=$DISABLED])
PKG_CHECK_MODULES([quorum], [libquorum], [], [with_corosync=$DISABLED])
PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common], [], [with_corosync=$DISABLED])
AS_IF([test $with_corosync -ne $DISABLED], [with_corosync=$REQUIRED])
]
)
AS_IF([test $with_corosync -ne $DISABLED],
[
AC_MSG_CHECKING([for Corosync 2 or later])
AC_MSG_RESULT([yes])
CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS"
CPPFLAGS="$CPPFLAGS `$PKG_CONFIG --cflags-only-I corosync`"
COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS"
CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS"
PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum"
STACKS="$STACKS corosync-ge-2"
dnl Shutdown tracking added (back) to corosync Jan 2021
saved_LIBS="$LIBS"
LIBS="$LIBS $COROSYNC_LIBS"
AC_CHECK_FUNCS([corosync_cfg_trackstart])
LIBS="$saved_LIBS"
]
)
AC_DEFINE_UNQUOTED([SUPPORT_COROSYNC], [$with_corosync],
[Support the Corosync messaging and membership layer])
AM_CONDITIONAL([BUILD_CS_SUPPORT], [test $with_corosync -eq $REQUIRED])
AC_SUBST([SUPPORT_COROSYNC])
dnl
dnl Cluster stack - Sanity
dnl
AS_IF([test x"$STACKS" != x""], [AC_MSG_NOTICE([Supported stacks:${STACKS}])],
[AC_MSG_FAILURE([At least one cluster stack must be supported])])
PCMK_FEATURES="${PCMK_FEATURES}${STACKS}"
AC_SUBST(CLUSTERLIBS)
AC_SUBST(PC_NAME_CLUSTER)
dnl ========================================================================
dnl CIB secrets
dnl ========================================================================
AS_IF([test $with_cibsecrets -ne $DISABLED],
[
with_cibsecrets=$REQUIRED
PCMK_FEATURES="$PCMK_FEATURES cibsecrets"
LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets"
AC_DEFINE_UNQUOTED([LRM_CIBSECRETS_DIR], ["$LRM_CIBSECRETS_DIR"],
[Location for CIB secrets])
AC_SUBST([LRM_CIBSECRETS_DIR])
]
)
AC_DEFINE_UNQUOTED([SUPPORT_CIBSECRETS], [$with_cibsecrets], [Support CIB secrets])
AM_CONDITIONAL([BUILD_CIBSECRETS], [test $with_cibsecrets -eq $REQUIRED])
dnl ========================================================================
dnl GnuTLS
dnl ========================================================================
dnl Require GnuTLS >=2.12.0 (2011-03) for Pacemaker Remote support
PC_NAME_GNUTLS=""
AS_CASE([$with_gnutls],
[$REQUIRED], [
REQUIRE_LIB([gnutls], [gnutls_sec_param_to_pk_bits])
REQUIRE_HEADER([gnutls/gnutls.h])
],
[$OPTIONAL], [
AC_CHECK_LIB([gnutls], [gnutls_sec_param_to_pk_bits],
[], [with_gnutls=$DISABLED])
AC_CHECK_HEADERS([gnutls/gnutls.h], [], [with_gnutls=$DISABLED])
]
)
AS_IF([test $with_gnutls -ne $DISABLED],
[
PC_NAME_GNUTLS="gnutls"
PCMK_FEATURES="$PCMK_FEATURES remote"
]
)
AC_SUBST([PC_NAME_GNUTLS])
AM_CONDITIONAL([BUILD_REMOTE], [test $with_gnutls -ne $DISABLED])
# --- ASAN/UBSAN/TSAN (see man gcc) ---
# when using SANitizers, we need to pass the -fsanitize..
# to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be
# specified as first in the list or there will be runtime
# issues (for example user has to LD_PRELOAD asan for it to work
# properly).
AS_IF([test -n "${SANITIZERS}"], [
SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g')
for SANITIZER in $SANITIZERS
do
AS_CASE([$SANITIZER],
[asan|ASAN], [
SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address"
SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan"
PCMK_FEATURES="$PCMK_FEATURES asan"
REQUIRE_LIB([asan],[main])
],
[ubsan|UBSAN], [
SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined"
SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan"
PCMK_FEATURES="$PCMK_FEATURES ubsan"
REQUIRE_LIB([ubsan],[main])
],
[tsan|TSAN], [
SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread"
SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan"
PCMK_FEATURES="$PCMK_FEATURES tsan"
REQUIRE_LIB([tsan],[main])
])
done
])
dnl ========================================================================
dnl Compiler flags
dnl ========================================================================
dnl Make sure that CFLAGS is not exported. If the user did
dnl not have CFLAGS in their environment then this should have
dnl no effect. However if CFLAGS was exported from the user's
dnl environment, then the new CFLAGS will also be exported
dnl to sub processes.
AS_IF([export | fgrep " CFLAGS=" > /dev/null],
[
SAVED_CFLAGS="$CFLAGS"
unset CFLAGS
CFLAGS="$SAVED_CFLAGS"
unset SAVED_CFLAGS
])
CC_EXTRAS=""
AS_IF([test x"$GCC" != x"yes"], [CFLAGS="$CFLAGS -g"], [
CFLAGS="$CFLAGS -ggdb"
dnl When we don't have diagnostic push / pull, we can't explicitly disable
dnl checking for nonliteral formats in the places where they occur on purpose
dnl thus we disable nonliteral format checking globally as we are aborting
dnl on warnings.
dnl what makes the things really ugly is that nonliteral format checking is
dnl obviously available as an extra switch in very modern gcc but for older
dnl gcc this is part of -Wformat=2
dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral
dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2
dnl otherwise none of both
gcc_diagnostic_push_pull=no
cc_temp_flags "$CFLAGS $WERROR"
AC_MSG_CHECKING([for gcc diagnostic push / pull])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#pragma GCC diagnostic push
#pragma GCC diagnostic pop
]])],
[
AC_MSG_RESULT([yes])
gcc_diagnostic_push_pull=yes
], AC_MSG_RESULT([no]))
cc_restore_flags
AS_IF([cc_supports_flag "-Wformat-nonliteral"],
[gcc_format_nonliteral=yes],
[gcc_format_nonliteral=no])
# We had to eliminate -Wnested-externs because of libtool changes
# Make sure to order options so that the former stand for prerequisites
# of the latter (e.g., -Wformat-nonliteral requires -Wformat).
EXTRA_FLAGS="-fgnu89-inline"
EXTRA_FLAGS="$EXTRA_FLAGS -Wall"
EXTRA_FLAGS="$EXTRA_FLAGS -Waggregate-return"
EXTRA_FLAGS="$EXTRA_FLAGS -Wbad-function-cast"
EXTRA_FLAGS="$EXTRA_FLAGS -Wcast-align"
EXTRA_FLAGS="$EXTRA_FLAGS -Wdeclaration-after-statement"
EXTRA_FLAGS="$EXTRA_FLAGS -Wendif-labels"
EXTRA_FLAGS="$EXTRA_FLAGS -Wfloat-equal"
EXTRA_FLAGS="$EXTRA_FLAGS -Wformat-security"
EXTRA_FLAGS="$EXTRA_FLAGS -Wimplicit-fallthrough"
EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-prototypes"
EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-declarations"
EXTRA_FLAGS="$EXTRA_FLAGS -Wnested-externs"
EXTRA_FLAGS="$EXTRA_FLAGS -Wno-long-long"
EXTRA_FLAGS="$EXTRA_FLAGS -Wno-strict-aliasing"
EXTRA_FLAGS="$EXTRA_FLAGS -Wpointer-arith"
EXTRA_FLAGS="$EXTRA_FLAGS -Wstrict-prototypes"
EXTRA_FLAGS="$EXTRA_FLAGS -Wwrite-strings"
EXTRA_FLAGS="$EXTRA_FLAGS -Wunused-but-set-variable"
EXTRA_FLAGS="$EXTRA_FLAGS -Wunsigned-char"
AS_IF([test x"$gcc_diagnostic_push_pull" = x"yes"],
[
AC_DEFINE([HAVE_FORMAT_NONLITERAL], [],
[gcc can complain about nonliterals in format])
EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral"
],
[test x"$gcc_format_nonliteral" = x"yes"],
[EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"])
# Additional warnings it might be nice to enable one day
# -Wshadow
# -Wunreachable-code
for j in $EXTRA_FLAGS
do
AS_IF([cc_supports_flag $CC_EXTRAS $j], [CC_EXTRAS="$CC_EXTRAS $j"])
done
AC_MSG_NOTICE([Using additional gcc flags: ${CC_EXTRAS}])
])
dnl
dnl Hardening flags
dnl
dnl The prime control of whether to apply (targeted) hardening build flags and
dnl which ones is --{enable,disable}-hardening option passed to ./configure:
dnl
dnl --enable-hardening=try (default):
dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE,
dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables
dnl (see below) is set and non-null, all these custom flags (even if not
dnl set) are used as are, otherwise the best effort is made to offer
dnl reasonably strong hardening in several categories (RELRO, PIE,
dnl "bind now", stack protector) according to what the selected toolchain
dnl can offer
dnl
dnl --enable-hardening:
dnl same effect as --enable-hardening=try when the environment variables
dnl in question are suppressed
dnl
dnl --disable-hardening:
dnl do not apply any targeted hardening measures at all
dnl
dnl The user-injected environment variables that regulate the hardening in
dnl default case are as follows:
dnl
dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE
dnl compiler and linker flags (respectively) for daemon programs
dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd,
dnl pacemaker-based, pacemaker-fenced, pacemaker-remoted,
dnl pacemaker-schedulerd)
dnl
dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB
dnl compiler and linker flags (respectively) for libraries linked
dnl with the daemon programs
dnl
dnl Note that these are purposedly targeted variables (addressing particular
dnl targets all over the scattered Makefiles) and have no effect outside of
dnl the predestined scope (e.g., CLI utilities). For a global reach,
dnl use CFLAGS, LDFLAGS, etc. as usual.
dnl
dnl For guidance on the suitable flags consult, for instance:
dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description
dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils
dnl
AS_IF([test $enable_hardening -eq $OPTIONAL],
[
AS_IF([test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0],
[enable_hardening=$REQUIRED],
[AC_MSG_NOTICE([Hardening: using custom flags from environment])]
)
],
[
unset CFLAGS_HARDENED_EXE
unset CFLAGS_HARDENED_LIB
unset LDFLAGS_HARDENED_EXE
unset LDFLAGS_HARDENED_LIB
]
)
AS_CASE([$enable_hardening],
[$DISABLED], [AC_MSG_NOTICE([Hardening: explicitly disabled])],
[$REQUIRED], [
CFLAGS_HARDENED_EXE=
CFLAGS_HARDENED_LIB=
LDFLAGS_HARDENED_EXE=
LDFLAGS_HARDENED_LIB=
relro=0
pie=0
bindnow=0
stackprot="none"
# daemons incl. libs: partial RELRO
flag="-Wl,-z,relro"
CC_CHECK_LDFLAGS(["${flag}"],
[
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
relro=1
])
# daemons: PIE for both CFLAGS and LDFLAGS
AS_IF([cc_supports_flag -fPIE],
[
flag="-pie"
CC_CHECK_LDFLAGS(["${flag}"],
[
CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
pie=1
])
]
)
# daemons incl. libs: full RELRO if sensible + as-needed linking
# so as to possibly mitigate startup performance
# hit caused by excessive linking with unneeded
# libraries
AS_IF([test "${relro}" = 1 && test "${pie}" = 1],
[
flag="-Wl,-z,now"
CC_CHECK_LDFLAGS(["${flag}"],
[
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
bindnow=1
])
]
)
AS_IF([test "${bindnow}" = 1],
[
flag="-Wl,--as-needed"
CC_CHECK_LDFLAGS(["${flag}"],
[
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
])
])
# universal: prefer strong > all > default stack protector if possible
flag=
AS_IF([cc_supports_flag -fstack-protector-strong],
[
flag="-fstack-protector-strong"
stackprot="strong"
],
[cc_supports_flag -fstack-protector-all],
[
flag="-fstack-protector-all"
stackprot="all"
],
[cc_supports_flag -fstack-protector],
[
flag="-fstack-protector"
stackprot="default"
]
)
AS_IF([test -n "${flag}"], [CC_EXTRAS="${CC_EXTRAS} ${flag}"])
# universal: enable stack clash protection if possible
AS_IF([cc_supports_flag -fstack-clash-protection],
[
CC_EXTRAS="${CC_EXTRAS} -fstack-clash-protection"
AS_IF([test "${stackprot}" = "none"],
[stackprot="clash-only"],
[stackprot="${stackprot}+clash"]
)
]
)
# Log a summary
AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test x"${stackprot}" != x"none"],
[AC_MSG_NOTICE(m4_normalize([Hardening:
relro=${relro}
pie=${pie}
bindnow=${bindnow}
stackprot=${stackprot}]))
],
[AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])]
)
],
)
CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS"
LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS"
CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE"
LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE"
NON_FATAL_CFLAGS="$CFLAGS"
AC_SUBST(NON_FATAL_CFLAGS)
dnl
dnl We reset CFLAGS to include our warnings *after* all function
dnl checking goes on, so that our warning flags don't keep the
dnl AC_*FUNCS() calls above from working. In particular, -Werror will
dnl *always* cause us troubles if we set it before here.
dnl
dnl
AS_IF([test $enable_fatal_warnings -ne $DISABLED], [
AC_MSG_NOTICE([Enabling fatal compiler warnings])
CFLAGS="$CFLAGS $WERROR"
])
AC_SUBST(CFLAGS)
dnl This is useful for use in Makefiles that need to remove one specific flag
CFLAGS_COPY="$CFLAGS"
AC_SUBST(CFLAGS_COPY)
AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries
AC_SUBST(LOCALE)
dnl Options for cleaning up the compiler output
AS_IF([test $enable_quiet -ne $DISABLED],
[
AC_MSG_NOTICE([Suppressing make details])
QUIET_LIBTOOL_OPTS="--silent"
QUIET_MAKE_OPTS="-s" # POSIX compliant
],
[
QUIET_LIBTOOL_OPTS=""
QUIET_MAKE_OPTS=""
]
)
dnl Put the above variables to use
LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)"
MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}"
# Make features list available (sorted alphabetically, without leading space)
PCMK_FEATURES=`echo "$PCMK_FEATURES" | sed -e 's/^ //' -e 's/ /\n/g' | sort | xargs`
AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features)
AC_SUBST(PCMK_FEATURES)
AC_SUBST(CC)
AC_SUBST(MAKEFLAGS)
AC_SUBST(LIBTOOL)
AC_SUBST(QUIET_LIBTOOL_OPTS)
dnl Files we output that need to be executable
CONFIG_FILES_EXEC([agents/ocf/ClusterMon],
[agents/ocf/Dummy],
[agents/ocf/HealthCPU],
[agents/ocf/HealthIOWait],
[agents/ocf/HealthSMART],
[agents/ocf/Stateful],
[agents/ocf/SysInfo],
[agents/ocf/attribute],
[agents/ocf/controld],
[agents/ocf/ifspeed],
[agents/ocf/o2cb],
[agents/ocf/ping],
[agents/ocf/remote],
[agents/stonith/fence_legacy],
[agents/stonith/fence_watchdog],
[cts/cts-attrd],
[cts/cts-cli],
[cts/cts-exec],
[cts/cts-fencing],
[cts/cts-regression],
[cts/cts-scheduler],
[cts/benchmark/clubench],
[cts/lab/CTSlab.py],
[cts/lab/OCFIPraTest.py],
[cts/lab/cluster_test],
[cts/lab/cts],
[cts/lab/cts-log-watcher],
[cts/support/LSBDummy],
[cts/support/cts-support],
[cts/support/fence_dummy],
[cts/support/pacemaker-cts-dummyd],
[doc/abi-check],
[maint/bumplibs],
[tools/cluster-clean],
[tools/cluster-helper],
[tools/cluster-init],
[tools/crm_failcount],
[tools/crm_master],
[tools/crm_report],
[tools/crm_standby],
[tools/cibsecret],
[tools/pcmk_simtimes])
dnl Other files we output
AC_CONFIG_FILES(Makefile \
agents/Makefile \
agents/alerts/Makefile \
agents/ocf/Makefile \
agents/stonith/Makefile \
cts/Makefile \
cts/benchmark/Makefile \
cts/lab/Makefile \
cts/scheduler/Makefile \
cts/scheduler/dot/Makefile \
cts/scheduler/exp/Makefile \
cts/scheduler/scores/Makefile \
cts/scheduler/stderr/Makefile \
cts/scheduler/summary/Makefile \
cts/scheduler/xml/Makefile \
cts/support/Makefile \
cts/support/pacemaker-cts-dummyd@.service \
daemons/Makefile \
daemons/attrd/Makefile \
daemons/based/Makefile \
daemons/controld/Makefile \
daemons/execd/Makefile \
daemons/execd/pacemaker_remote \
daemons/execd/pacemaker_remote.service \
daemons/fenced/Makefile \
daemons/pacemakerd/Makefile \
daemons/pacemakerd/pacemaker.combined.upstart \
daemons/pacemakerd/pacemaker.service \
daemons/pacemakerd/pacemaker.upstart \
daemons/schedulerd/Makefile \
devel/Makefile \
doc/Doxyfile \
doc/Makefile \
doc/sphinx/Makefile \
etc/Makefile \
etc/init.d/pacemaker \
etc/logrotate.d/pacemaker \
etc/sysconfig/pacemaker \
include/Makefile \
include/crm/Makefile \
include/crm/cib/Makefile \
include/crm/common/Makefile \
include/crm/cluster/Makefile \
include/crm/fencing/Makefile \
include/crm/pengine/Makefile \
include/pcmki/Makefile \
lib/Makefile \
lib/cib/Makefile \
lib/cluster/Makefile \
lib/common/Makefile \
lib/common/tests/Makefile \
lib/common/tests/acl/Makefile \
lib/common/tests/agents/Makefile \
lib/common/tests/cmdline/Makefile \
lib/common/tests/flags/Makefile \
lib/common/tests/health/Makefile \
lib/common/tests/io/Makefile \
lib/common/tests/iso8601/Makefile \
lib/common/tests/lists/Makefile \
lib/common/tests/nvpair/Makefile \
lib/common/tests/operations/Makefile \
lib/common/tests/options/Makefile \
lib/common/tests/output/Makefile \
lib/common/tests/procfs/Makefile \
lib/common/tests/results/Makefile \
lib/common/tests/scores/Makefile \
lib/common/tests/strings/Makefile \
lib/common/tests/utils/Makefile \
lib/common/tests/xml/Makefile \
lib/common/tests/xpath/Makefile \
lib/fencing/Makefile \
lib/gnu/Makefile \
lib/libpacemaker.pc \
lib/lrmd/Makefile \
lib/pacemaker/Makefile \
lib/pacemaker.pc \
lib/pacemaker-cib.pc \
lib/pacemaker-cluster.pc \
lib/pacemaker-fencing.pc \
lib/pacemaker-lrmd.pc \
lib/pacemaker-service.pc \
lib/pacemaker-pe_rules.pc \
lib/pacemaker-pe_status.pc \
lib/pengine/Makefile \
lib/pengine/tests/Makefile \
lib/pengine/tests/native/Makefile \
lib/pengine/tests/rules/Makefile \
lib/pengine/tests/status/Makefile \
lib/pengine/tests/unpack/Makefile \
lib/pengine/tests/utils/Makefile \
lib/services/Makefile \
maint/Makefile \
po/Makefile.in \
python/Makefile \
python/setup.py \
python/pacemaker/Makefile \
python/pacemaker/_cts/Makefile \
python/pacemaker/_cts/tests/Makefile \
python/pacemaker/buildoptions.py \
python/tests/Makefile \
replace/Makefile \
rpm/Makefile \
tests/Makefile \
tools/Makefile \
tools/crm_mon.service \
tools/crm_mon.upstart \
tools/report.collector \
tools/report.common \
xml/Makefile \
xml/pacemaker-schemas.pc \
)
dnl Now process the entire list of files added by previous
dnl calls to AC_CONFIG_FILES()
AC_OUTPUT()
dnl *****************
dnl Configure summary
dnl *****************
AC_MSG_NOTICE([])
AC_MSG_NOTICE([$PACKAGE configuration:])
AC_MSG_NOTICE([ Version = ${VERSION} (Build: $BUILD_VERSION)])
AC_MSG_NOTICE([ Features = ${PCMK_FEATURES}])
AC_MSG_NOTICE([])
AC_MSG_NOTICE([ Prefix = ${prefix}])
AC_MSG_NOTICE([ Executables = ${sbindir}])
AC_MSG_NOTICE([ Man pages = ${mandir}])
AC_MSG_NOTICE([ Libraries = ${libdir}])
AC_MSG_NOTICE([ Header files = ${includedir}])
AC_MSG_NOTICE([ Arch-independent files = ${datadir}])
AC_MSG_NOTICE([ State information = ${localstatedir}])
AC_MSG_NOTICE([ System configuration = ${sysconfdir}])
AC_MSG_NOTICE([ OCF agents = ${OCF_ROOT_DIR}])
AC_MSG_NOTICE([])
AC_MSG_NOTICE([ HA group name = ${CRM_DAEMON_GROUP}])
AC_MSG_NOTICE([ HA user name = ${CRM_DAEMON_USER}])
AC_MSG_NOTICE([])
AC_MSG_NOTICE([ CFLAGS = ${CFLAGS}])
AC_MSG_NOTICE([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}])
AC_MSG_NOTICE([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}])
AC_MSG_NOTICE([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}])
AC_MSG_NOTICE([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}])
AC_MSG_NOTICE([ Libraries = ${LIBS}])
AC_MSG_NOTICE([ Stack Libraries = ${CLUSTERLIBS}])
AC_MSG_NOTICE([ Unix socket auth method = ${us_auth}])
diff --git a/cts/lab/CIB.py b/cts/lab/CIB.py
deleted file mode 100644
index bc40cd05bd..0000000000
--- a/cts/lab/CIB.py
+++ /dev/null
@@ -1,478 +0,0 @@
-""" CIB generator for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-import os
-import warnings
-import tempfile
-
-from pacemaker.buildoptions import BuildOptions
-from pacemaker._cts.CTS import CtsLab
-from pacemaker._cts.cibxml import Alerts, Clone, Expression, FencingTopology, Group, Nodes, OpDefaults, Option, Resource, Rule
-from pacemaker._cts.network import next_ip
-
-
-class ConfigBase(object):
- cts_cib = None
- version = "unknown"
- Factory = None
-
- def __init__(self, CM, factory, tmpfile=None):
- self.CM = CM
- self.Factory = factory
-
- if not tmpfile:
- warnings.filterwarnings("ignore")
- f=tempfile.NamedTemporaryFile(delete=True)
- f.close()
- tmpfile = f.name
- warnings.resetwarnings()
-
- self.Factory.tmpfile = tmpfile
-
- def version(self):
- return self.version
-
-
-class CIB12(ConfigBase):
- version = "pacemaker-1.2"
- counter = 1
-
- def _show(self, command=""):
- output = ""
- (_, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, verbose=1)
- for line in result:
- output += line
- self.Factory.debug("Generated Config: "+line)
- return output
-
- def NewIP(self, name=None, standard="ocf"):
- if self.CM.Env["IPagent"] == "IPaddr2":
- ip = next_ip(self.CM.Env["IPBase"])
- if not name:
- if ":" in ip:
- (prefix, sep, suffix) = ip.rpartition(":")
- name = "r"+suffix
- else:
- name = "r"+ip
-
- r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
- r["ip"] = ip
-
- if ":" in ip:
- r["cidr_netmask"] = "64"
- r["nic"] = "eth0"
- else:
- r["cidr_netmask"] = "32"
-
- else:
- if not name:
- name = "r%s%d" % (self.CM.Env["IPagent"], self.counter)
- self.counter = self.counter + 1
- r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
-
- r.add_op("monitor", "5s")
- return r
-
- def get_node_id(self, node_name):
- """ Check the cluster configuration for a node ID. """
-
- # We can't account for every possible configuration,
- # so we only return a node ID if:
- # * The node is specified in /etc/corosync/corosync.conf
- # with "ring0_addr:" equal to node_name and "nodeid:"
- # explicitly specified.
- # In all other cases, we return 0.
- node_id = 0
-
- # awkward command: use } as record separator
- # so each corosync.conf "object" is one record;
- # match the "node {" record that has "ring0_addr: node_name";
- # then print the substring of that record after "nodeid:"
- (rc, output) = self.Factory.rsh(self.Factory.target,
- r"""awk -v RS="}" """
- r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/"""
- r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}' %s"""
- % (node_name, BuildOptions.COROSYNC_CONFIG_FILE), verbose=1)
-
- if rc == 0 and len(output) == 1:
- try:
- node_id = int(output[0])
- except ValueError:
- node_id = 0
-
- return node_id
-
- def install(self, target):
- old = self.Factory.tmpfile
-
- # Force a rebuild
- self.cts_cib = None
-
- self.Factory.tmpfile = BuildOptions.CIB_DIR + "/cib.xml"
- self.contents(target)
- self.Factory.rsh(self.Factory.target, "chown " + BuildOptions.DAEMON_USER + " " + self.Factory.tmpfile)
-
- self.Factory.tmpfile = old
-
- def contents(self, target=None):
- # fencing resource
- if self.cts_cib:
- return self.cts_cib
-
- if target:
- self.Factory.target = target
-
- self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self.Factory.tmpfile))
- self.num_nodes = len(self.CM.Env["nodes"])
-
- no_quorum = "stop"
- if self.num_nodes < 3:
- no_quorum = "ignore"
- self.Factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self.num_nodes)
-
- # We don't need a nodes section unless we add attributes
- stn = None
-
- # Fencing resource
- # Define first so that the shell doesn't reject every update
- if self.CM.Env["DoFencing"]:
-
- # Define the "real" fencing device
- st = Resource(self.Factory, "Fencing", ""+self.CM.Env["stonith-type"], "stonith")
-
- # Set a threshold for unreliable stonith devices such as the vmware one
- st.add_meta("migration-threshold", "5")
- st.add_op("monitor", "120s", timeout="120s")
- st.add_op("stop", "0", timeout="60s")
- st.add_op("start", "0", timeout="60s")
-
- # For remote node tests, a cluster node is stopped and brought back up
- # as a remote node with the name "remote-OLDNAME". To allow fencing
- # devices to fence these nodes, create a list of all possible node names.
- all_node_names = [ prefix+n for n in self.CM.Env["nodes"] for prefix in ('', 'remote-') ]
-
- # Add all parameters specified by user
- entries = self.CM.Env["stonith-params"].split(',')
- for entry in entries:
- try:
- (name, value) = entry.split('=', 1)
- except ValueError:
- print("Warning: skipping invalid fencing parameter: %s" % entry)
- continue
-
- # Allow user to specify "all" as the node list, and expand it here
- if name in [ "hostlist", "pcmk_host_list" ] and value == "all":
- value = ' '.join(all_node_names)
-
- st[name] = value
-
- st.commit()
-
- # Test advanced fencing logic
- if True:
- stf_nodes = []
- stt_nodes = []
- attr_nodes = {}
-
- # Create the levels
- stl = FencingTopology(self.Factory)
- for node in self.CM.Env["nodes"]:
- # Remote node tests will rename the node
- remote_node = "remote-" + node
-
- # Randomly assign node to a fencing method
- ftype = self.CM.Env.random_gen.choice(["levels-and", "levels-or ", "broadcast "])
-
- # For levels-and, randomly choose targeting by node name or attribute
- by = ""
- if ftype == "levels-and":
- node_id = self.get_node_id(node)
- if node_id == 0 or self.CM.Env.random_gen.choice([True, False]):
- by = " (by name)"
- else:
- attr_nodes[node] = node_id
- by = " (by attribute)"
-
- self.CM.log(" - Using %s fencing for node: %s%s" % (ftype, node, by))
-
- if ftype == "levels-and":
- # If targeting by name, add a topology level for this node
- if node not in attr_nodes:
- stl.level(1, node, "FencingPass,Fencing")
-
- # Always target remote nodes by name, otherwise we would need to add
- # an attribute to the remote node only during remote tests (we don't
- # want nonexistent remote nodes showing up in the non-remote tests).
- # That complexity is not worth the effort.
- stl.level(1, remote_node, "FencingPass,Fencing")
-
- # Add the node (and its remote equivalent) to the list of levels-and nodes.
- stt_nodes.extend([node, remote_node])
-
- elif ftype == "levels-or ":
- for n in [ node, remote_node ]:
- stl.level(1, n, "FencingFail")
- stl.level(2, n, "Fencing")
- stf_nodes.extend([node, remote_node])
-
- # If any levels-and nodes were targeted by attribute,
- # create the attributes and a level for the attribute.
- if attr_nodes:
- stn = Nodes(self.Factory)
- for (node_name, node_id) in list(attr_nodes.items()):
- stn.add_node(node_name, node_id, { "cts-fencing" : "levels-and" })
- stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and")
-
- # Create a Dummy agent that always passes for levels-and
- if len(stt_nodes):
- stt = Resource(self.Factory, "FencingPass", "fence_dummy", "stonith")
- stt["pcmk_host_list"] = " ".join(stt_nodes)
- # Wait this many seconds before doing anything, handy for letting disks get flushed too
- stt["random_sleep_range"] = "30"
- stt["mode"] = "pass"
- stt.commit()
-
- # Create a Dummy agent that always fails for levels-or
- if len(stf_nodes):
- stf = Resource(self.Factory, "FencingFail", "fence_dummy", "stonith")
- stf["pcmk_host_list"] = " ".join(stf_nodes)
- # Wait this many seconds before doing anything, handy for letting disks get flushed too
- stf["random_sleep_range"] = "30"
- stf["mode"] = "fail"
- stf.commit()
-
- # Now commit the levels themselves
- stl.commit()
-
- o = Option(self.Factory)
- o["stonith-enabled"] = self.CM.Env["DoFencing"]
- o["start-failure-is-fatal"] = "false"
- o["pe-input-series-max"] = "5000"
- o["shutdown-escalation"] = "5min"
- o["batch-limit"] = "10"
- o["dc-deadtime"] = "5s"
- o["no-quorum-policy"] = no_quorum
-
- o.commit()
-
- o = OpDefaults(self.Factory)
- o["timeout"] = "90s"
- o.commit()
-
- # Commit the nodes section if we defined one
- if stn is not None:
- stn.commit()
-
- # Add an alerts section if possible
- if self.Factory.rsh.exists_on_all(self.CM.Env["notification-agent"], self.CM.Env["nodes"]):
- alerts = Alerts(self.Factory)
- alerts.add_alert(self.CM.Env["notification-agent"],
- self.CM.Env["notification-recipient"])
- alerts.commit()
-
- # Add resources?
- if self.CM.Env["CIBResource"]:
- self.add_resources()
-
- if self.CM.cluster_monitor == 1:
- mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker")
- mon.add_op("start", "0", requires="nothing")
- mon.add_op("monitor", "5s", requires="nothing")
- mon["update"] = "10"
- mon["extra_options"] = "-r -n"
- mon["user"] = "abeekhof"
- mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html"
- mon.commit()
-
- #self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''')
-
- # generate cib
- self.cts_cib = self._show()
-
- if self.Factory.tmpfile != BuildOptions.CIB_DIR + "/cib.xml":
- self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile)
-
- return self.cts_cib
-
- def add_resources(self):
- # Per-node resources
- for node in self.CM.Env["nodes"]:
- name = "rsc_"+node
- r = self.NewIP(name)
- r.prefer(node, "100")
- r.commit()
-
- # Migrator
- # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
- m = Resource(self.Factory, "migrator","Dummy", "ocf", "pacemaker")
- m["passwd"] = "whatever"
- m.add_meta("resource-stickiness","1")
- m.add_meta("allow-migrate", "1")
- m.add_op("monitor", "P10S")
- m.commit()
-
- # Ping the test exerciser
- p = Resource(self.Factory, "ping-1","ping", "ocf", "pacemaker")
- p.add_op("monitor", "60s")
- p["host_list"] = self.CM.Env["cts-exerciser"]
- p["name"] = "connected"
- p["debug"] = "true"
-
- c = Clone(self.Factory, "Connectivity", p)
- c["globally-unique"] = "false"
- c.commit()
-
- # promotable clone resource
- s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker")
- s.add_op("monitor", "15s", timeout="60s")
- s.add_op("monitor", "16s", timeout="60s", role="Promoted")
- ms = Clone(self.Factory, "promotable-1", s)
- ms["promotable"] = "true"
- ms["clone-max"] = self.num_nodes
- ms["clone-node-max"] = 1
- ms["promoted-max"] = 1
- ms["promoted-node-max"] = 1
-
- # Require connectivity to run the promotable clone
- r = Rule(self.Factory, "connected", "-INFINITY", op="or")
- r.add_child(Expression(self.Factory, "m1-connected-1", "connected", "lt", "1"))
- r.add_child(Expression(self.Factory, "m1-connected-2", "connected", "not_defined", None))
- ms.prefer("connected", rule=r)
-
- ms.commit()
-
- # Group Resource
- g = Group(self.Factory, "group-1")
- g.add_child(self.NewIP())
-
- if self.CM.Env["have_systemd"]:
- sysd = Resource(self.Factory, "petulant",
- "pacemaker-cts-dummyd@10", "service")
- sysd.add_op("monitor", "P10S")
- g.add_child(sysd)
- else:
- g.add_child(self.NewIP())
-
- g.add_child(self.NewIP())
-
- # Make group depend on the promotable clone
- g.after("promotable-1", first="promote", then="start")
- g.colocate("promotable-1", "INFINITY", withrole="Promoted")
-
- g.commit()
-
- # LSB resource
- lsb = Resource(self.Factory, "lsb-dummy", "LSBDummy", "lsb")
- lsb.add_op("monitor", "5s")
-
- # LSB with group
- lsb.after("group-1")
- lsb.colocate("group-1")
-
- lsb.commit()
-
-
-class CIB20(CIB12):
- version = "pacemaker-2.5"
-
-class CIB30(CIB12):
- version = "pacemaker-3.7"
-
-#class HASI(CIB10):
-# def add_resources(self):
-# # DLM resource
-# self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''')
-# self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''')
-
- # O2CB resource
-# self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''')
-# self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''')
-# self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''')
-# self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''')
-
-
-class ConfigFactory(object):
- def __init__(self, CM):
- self.CM = CM
- self.rsh = self.CM.rsh
- self.register("pacemaker12", CIB12, CM, self)
- self.register("pacemaker20", CIB20, CM, self)
- self.register("pacemaker30", CIB30, CM, self)
- if not self.CM.Env["ListTests"]:
- self.target = self.CM.Env["nodes"][0]
- self.tmpfile = None
-
- def log(self, args):
- self.CM.log("cib: %s" % args)
-
- def debug(self, args):
- self.CM.debug("cib: %s" % args)
-
- def register(self, methodName, constructor, *args, **kargs):
- """register a constructor"""
- _args = [constructor]
- _args.extend(args)
- setattr(self, methodName, ConfigFactoryItem(*_args, **kargs))
-
- def unregister(self, methodName):
- """unregister a constructor"""
- delattr(self, methodName)
-
- def createConfig(self, name="pacemaker-1.0"):
- if name == "pacemaker-1.0":
- name = "pacemaker10";
- elif name == "pacemaker-1.2":
- name = "pacemaker12";
- elif name == "pacemaker-2.0":
- name = "pacemaker20";
- elif name.startswith("pacemaker-3."):
- name = "pacemaker30";
-
- if hasattr(self, name):
- return getattr(self, name)()
- else:
- self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name)
-
- return self.pacemaker30()
-
-
-class ConfigFactoryItem(object):
- def __init__(self, function, *args, **kargs):
- self._function = function
- self._args = args
- self._kargs = kargs
-
- def __call__(self, *args, **kargs):
- """call function"""
- _args = list(self._args)
- _args.extend(args)
- _kargs = self._kargs.copy()
- _kargs.update(kargs)
- return self._function(*_args,**_kargs)
-
-if __name__ == '__main__':
- """ Unit test (pass cluster node names as command line arguments) """
-
- import cts.CM_corosync
- import sys
-
- if len(sys.argv) < 2:
- print("Usage: %s <node> ..." % sys.argv[0])
- sys.exit(1)
-
- args = [
- "--nodes", " ".join(sys.argv[1:]),
- "--clobber-cib",
- "--populate-resources",
- "--stack", "corosync",
- "--test-ip-base", "fe80::1234:56:7890:1000",
- "--stonith", "rhcs",
- ]
- env = CtsLab(args)
- cm = CM_corosync.crm_corosync()
- CibFactory = ConfigFactory(cm)
- cib = CibFactory.createConfig("pacemaker-3.0")
- print(cib.contents())
diff --git a/cts/lab/ClusterManager.py b/cts/lab/ClusterManager.py
index 4ffaa9de0a..4f199fc694 100644
--- a/cts/lab/ClusterManager.py
+++ b/cts/lab/ClusterManager.py
@@ -1,937 +1,935 @@
""" ClusterManager class for Pacemaker's Cluster Test Suite (CTS)
"""
__copyright__ = """Copyright 2000-2023 the Pacemaker project contributors.
Certain portions by Huang Zhen <zhenhltc@cn.ibm.com> are copyright 2004
International Business Machines. The version control history for this file
may have further details."""
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
import re
import time
from collections import UserDict
-from cts.CIB import ConfigFactory
-
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.CTS import NodeStatus, Process
from pacemaker._cts.audits import AuditResource
+from pacemaker._cts.cib import ConfigFactory
from pacemaker._cts.environment import EnvFactory
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.patterns import PatternSelector
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.watcher import LogWatcher
class ClusterManager(UserDict):
'''The Cluster Manager class.
This is an subclass of the Python dictionary class.
(this is because it contains lots of {name,value} pairs,
not because it's behavior is that terribly similar to a
dictionary in other ways.)
This is an abstract class which class implements high-level
operations on the cluster and/or its cluster managers.
Actual cluster managers classes are subclassed from this type.
One of the things we do is track the state we think every node should
be in.
'''
def __InitialConditions(self):
#if os.geteuid() != 0:
# raise ValueError("Must Be Root!")
None
def _finalConditions(self):
for key in list(self.keys()):
if self[key] == None:
raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.")
def __init__(self):
self.Env = EnvFactory().getInstance()
self.templates = PatternSelector(self.Env["Name"])
self.__InitialConditions()
self.logger = LogFactory()
self.TestLoggingLevel=0
self.data = {}
self.name = self.Env["Name"]
self.rsh = RemoteFactory().getInstance()
self.ShouldBeStatus={}
self.ns = NodeStatus(self.Env)
self.OurNode = os.uname()[1].lower()
self.__instance_errorstoignore = []
self.cib_installed = 0
self.config = None
- self.cluster_monitor = 0
self.use_short_names = 1
self._finalConditions()
self.check_transitions = 0
self.check_elections = 0
self.CIBsync = {}
self.CibFactory = ConfigFactory(self)
- self.cib = self.CibFactory.createConfig(self.Env["Schema"])
+ self.cib = self.CibFactory.create_config(self.Env["Schema"])
def __getitem__(self, key):
if key == "Name":
return self.name
print("FIXME: Getting %s from %s" % (key, repr(self)))
if key in self.data:
return self.data[key]
return self.templates.get_patterns(key)
def __setitem__(self, key, value):
print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
self.data[key] = value
def key_for_node(self, node):
return node
def instance_errorstoignore_clear(self):
'''Allows the test scenario to reset instance errors to ignore on each iteration.'''
self.__instance_errorstoignore = []
def instance_errorstoignore(self):
'''Return list of errors which are 'normal' for a specific test instance'''
return self.__instance_errorstoignore
def log(self, args):
self.logger.log(args)
def debug(self, args):
self.logger.debug(args)
def upcount(self):
'''How many nodes are up?'''
count = 0
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
count = count + 1
return count
def install_support(self, command="install"):
for node in self.Env["nodes"]:
self.rsh(node, BuildOptions.DAEMON_DIR + "/cts-support " + command)
def prepare_fencing_watcher(self, name):
# If we don't have quorum now but get it as a result of starting this node,
# then a bunch of nodes might get fenced
upnode = None
if self.HasQuorum(None):
self.debug("Have quorum")
return None
if not self.templates["Pat:Fencing_start"]:
print("No start pattern")
return None
if not self.templates["Pat:Fencing_ok"]:
print("No ok pattern")
return None
stonith = None
stonithPats = []
for peer in self.Env["nodes"]:
if self.ShouldBeStatus[peer] != "up":
stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
stonith = LogWatcher(self.Env["LogFileName"], stonithPats, self.Env["nodes"], self.Env["LogWatcher"], "StartupFencing", 0)
stonith.set_watch()
return stonith
def fencing_cleanup(self, node, stonith):
peer_list = []
peer_state = {}
self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
# If we just started a node, we may now have quorum (and permission to fence)
if not stonith:
self.debug("Nothing to do")
return peer_list
q = self.HasQuorum(None)
if not q and len(self.Env["nodes"]) > 2:
# We didn't gain quorum - we shouldn't have shot anyone
self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
return peer_list
for n in self.Env["nodes"]:
peer_state[n] = "unknown"
# Now see if any states need to be updated
self.debug("looking for: " + repr(stonith.regexes))
shot = stonith.look(0)
while shot:
line = repr(shot)
self.debug("Found: " + line)
del stonith.regexes[stonith.whichmatch]
# Extract node name
for n in self.Env["nodes"]:
if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
peer = n
peer_state[peer] = "complete"
self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer)
elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
# TODO: Correctly detect multiple fencing operations for the same host
peer = n
peer_state[peer] = "in-progress"
self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer)
if not peer:
self.logger.log("ERROR: Unknown stonith match: %s" % line)
elif not peer in peer_list:
self.debug("Found peer: " + peer)
peer_list.append(peer)
# Get the next one
shot = stonith.look(60)
for peer in peer_list:
self.debug(" Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
if self.Env["at-boot"]:
self.ShouldBeStatus[peer] = "up"
else:
self.ShouldBeStatus[peer] = "down"
if peer_state[peer] == "in-progress":
# Wait for any in-progress operations to complete
shot = stonith.look(60)
while len(stonith.regexes) and shot:
line = repr(shot)
self.debug("Found: " + line)
del stonith.regexes[stonith.whichmatch]
shot = stonith.look(60)
# Now make sure the node is alive too
self.ns.wait_for_node(peer, self.Env["DeadTime"])
# Poll until it comes up
if self.Env["at-boot"]:
if not self.StataCM(peer):
time.sleep(self.Env["StartTime"])
if not self.StataCM(peer):
self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
return None
return peer_list
def StartaCM(self, node, verbose=False):
'''Start up the cluster manager on a given node'''
if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
ret = 1
if not node in self.ShouldBeStatus:
self.ShouldBeStatus[node] = "down"
if self.ShouldBeStatus[node] != "down":
return 1
patterns = []
# Technically we should always be able to notice ourselves starting
patterns.append(self.templates["Pat:Local_started"] % node)
if self.upcount() == 0:
patterns.append(self.templates["Pat:DC_started"] % node)
else:
patterns.append(self.templates["Pat:NonDC_started"] % node)
watch = LogWatcher(
self.Env["LogFileName"], patterns, self.Env["nodes"], self.Env["LogWatcher"], "StartaCM", self.Env["StartTime"]+10)
self.install_config(node)
self.ShouldBeStatus[node] = "any"
if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
self.logger.log ("%s was already started" % (node))
return 1
stonith = self.prepare_fencing_watcher(node)
watch.set_watch()
(rc, _) = self.rsh(node, self.templates["StartCmd"])
if rc != 0:
self.logger.log ("Warn: Start command failed on node %s" % (node))
self.fencing_cleanup(node, stonith)
return None
self.ShouldBeStatus[node] = "up"
watch_result = watch.look_for_all()
if watch.unmatched:
for regex in watch.unmatched:
self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
if watch_result and self.cluster_stable(self.Env["DeadTime"]):
#self.debug("Found match: "+ repr(watch_result))
self.fencing_cleanup(node, stonith)
return 1
elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
self.fencing_cleanup(node, stonith)
return 1
self.logger.log ("Warn: Start failed for node %s" % (node))
return None
def StartaCMnoBlock(self, node, verbose=False):
'''Start up the cluster manager on a given node with none-block mode'''
if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node))
else: self.debug("Starting %s on node %s" % (self["Name"], node))
self.install_config(node)
self.rsh(node, self.templates["StartCmd"], synchronous=False)
self.ShouldBeStatus[node] = "up"
return 1
def StopaCM(self, node, verbose=False, force=False):
'''Stop the cluster manager on a given node'''
if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node))
else: self.debug("Stopping %s on node %s" % (self["Name"], node))
if self.ShouldBeStatus[node] != "up" and force == False:
return 1
(rc, _) = self.rsh(node, self.templates["StopCmd"])
if rc == 0:
# Make sure we can continue even if corosync leaks
# fdata-* is the old name
#self.rsh(node, "rm -rf /dev/shm/qb-* /dev/shm/fdata-*")
self.ShouldBeStatus[node] = "down"
self.cluster_stable(self.Env["DeadTime"])
return 1
else:
self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node))
return None
def StopaCMnoBlock(self, node):
'''Stop the cluster manager on a given node with none-block mode'''
self.debug("Stopping %s on node %s" % (self["Name"], node))
self.rsh(node, self.templates["StopCmd"], synchronous=False)
self.ShouldBeStatus[node] = "down"
return 1
def RereadCM(self, node):
'''Force the cluster manager on a given node to reread its config
This may be a no-op on certain cluster managers.
'''
(rc, _) = self.rsh(node, self.templates["RereadCmd"])
if rc == 0:
return 1
else:
self.logger.log ("Could not force %s on node %s to reread its config"
% (self["Name"], node))
return None
def startall(self, nodelist=None, verbose=False, quick=False):
'''Start the cluster manager on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
map = {}
if not nodelist:
nodelist = self.Env["nodes"]
for node in nodelist:
if self.ShouldBeStatus[node] == "down":
self.ns.wait_for_all_nodes(nodelist, 300)
if not quick:
# This is used for "basic sanity checks", so only start one node ...
if not self.StartaCM(node, verbose=verbose):
return 0
return 1
# Approximation of SimulStartList for --boot
watchpats = [ ]
watchpats.append(self.templates["Pat:DC_IDLE"])
for node in nodelist:
watchpats.append(self.templates["Pat:InfraUp"] % node)
watchpats.append(self.templates["Pat:PacemakerUp"] % node)
watchpats.append(self.templates["Pat:Local_started"] % node)
watchpats.append(self.templates["Pat:They_up"] % (nodelist[0], node))
# Start all the nodes - at about the same time...
watch = LogWatcher(self.Env["LogFileName"], watchpats, self.Env["nodes"], self.Env["LogWatcher"], "fast-start", self.Env["DeadTime"]+10)
watch.set_watch()
if not self.StartaCM(nodelist[0], verbose=verbose):
return 0
for node in nodelist:
self.StartaCMnoBlock(node, verbose=verbose)
watch.look_for_all()
if watch.unmatched:
for regex in watch.unmatched:
self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
if not self.cluster_stable():
self.logger.log("Cluster did not stabilize")
return 0
return 1
def stopall(self, nodelist=None, verbose=False, force=False):
'''Stop the cluster managers on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
ret = 1
map = {}
if not nodelist:
nodelist = self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up" or force == True:
if not self.StopaCM(node, verbose=verbose, force=force):
ret = 0
return ret
def rereadall(self, nodelist=None):
'''Force the cluster managers on every node in the cluster
to reread their config files. We can do it on a subset of the
cluster if nodelist is not None.
'''
map = {}
if not nodelist:
nodelist = self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
self.RereadCM(node)
def statall(self, nodelist=None):
'''Return the status of the cluster managers in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
result = {}
if not nodelist:
nodelist = self.Env["nodes"]
for node in nodelist:
if self.StataCM(node):
result[node] = "up"
else:
result[node] = "down"
return result
def isolate_node(self, target, nodes=None):
'''isolate the communication between the nodes'''
if not nodes:
nodes = self.Env["nodes"]
for node in nodes:
if node != target:
(rc, _) = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node))
if rc != 0:
self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
return None
else:
self.debug("Communication cut between %s and %s" % (target, node))
return 1
def unisolate_node(self, target, nodes=None):
'''fix the communication between the nodes'''
if not nodes:
nodes = self.Env["nodes"]
for node in nodes:
if node != target:
restored = 0
# Limit the amount of time we have asynchronous connectivity for
# Restore both sides as simultaneously as possible
self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=False)
self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=False)
self.debug("Communication restored between %s and %s" % (target, node))
def oprofileStart(self, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileStart(n)
elif node in self.Env["oprofile"]:
self.debug("Enabling oprofile on %s" % node)
self.rsh(node, "opcontrol --init")
self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
self.rsh(node, "opcontrol --start")
self.rsh(node, "opcontrol --reset")
def oprofileSave(self, test, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileSave(test, n)
elif node in self.Env["oprofile"]:
self.rsh(node, "opcontrol --dump")
self.rsh(node, "opcontrol --save=cts.%d" % test)
# Read back with: opreport -l session:cts.0 image:<directory>/c*
if None:
self.rsh(node, "opcontrol --reset")
else:
self.oprofileStop(node)
self.oprofileStart(node)
def oprofileStop(self, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileStop(n)
elif node in self.Env["oprofile"]:
self.debug("Stopping oprofile on %s" % node)
self.rsh(node, "opcontrol --reset")
self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
def errorstoignore(self):
# At some point implement a more elegant solution that
# also produces a report at the end
""" Return a list of known error messages that should be ignored """
return self.templates.get_patterns("BadNewsIgnore")
def install_config(self, node):
if not self.ns.wait_for_node(node):
self.log("Node %s is not up." % node)
return None
if not node in self.CIBsync and self.Env["ClobberCIB"]:
self.CIBsync[node] = 1
self.rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*")
# Only install the CIB on the first node, all the other ones will pick it up from there
if self.cib_installed == 1:
return None
self.cib_installed = 1
if self.Env["CIBfilename"] == None:
self.log("Installing Generated CIB on node %s" % (node))
self.cib.install(node)
else:
self.log("Installing CIB (%s) on node %s" % (self.Env["CIBfilename"], node))
if self.rsh.copy(self.Env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)) != 0:
raise ValueError("Can not scp file to %s %d"%(node))
self.rsh(node, "chown " + BuildOptions.DAEMON_USER + " " + BuildOptions.CIB_DIR + "/cib.xml")
def prepare(self):
'''Finish the Initialization process. Prepare to test...'''
self.partitions_expected = 1
for node in self.Env["nodes"]:
self.ShouldBeStatus[node] = ""
if self.Env["experimental-tests"]:
self.unisolate_node(node)
self.StataCM(node)
def test_node_CM(self, node):
'''Report the status of the cluster manager on a given node'''
watchpats = [ ]
watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
watchpats.append(self.templates["Pat:NonDC_started"] % node)
watchpats.append(self.templates["Pat:DC_started"] % node)
idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, [node], self.Env["LogWatcher"], "ClusterIdle")
idle_watch.set_watch()
(_, out) = self.rsh(node, self.templates["StatusCmd"]%node, verbose=1)
if not out:
out = ""
else:
out = out[0].strip()
self.debug("Node %s status: '%s'" %(node, out))
if out.find('ok') < 0:
if self.ShouldBeStatus[node] == "up":
self.log(
"Node status for %s is %s but we think it should be %s"
% (node, "down", self.ShouldBeStatus[node]))
self.ShouldBeStatus[node] = "down"
return 0
if self.ShouldBeStatus[node] == "down":
self.log(
"Node status for %s is %s but we think it should be %s: %s"
% (node, "up", self.ShouldBeStatus[node], out))
self.ShouldBeStatus[node] = "up"
# check the output first - because syslog-ng loses messages
if out.find('S_NOT_DC') != -1:
# Up and stable
return 2
if out.find('S_IDLE') != -1:
# Up and stable
return 2
# fall back to syslog-ng and wait
if not idle_watch.look():
# just up
self.debug("Warn: Node %s is unstable: %s" % (node, out))
return 1
# Up and stable
return 2
# Is the node up or is the node down
def StataCM(self, node):
'''Report the status of the cluster manager on a given node'''
if self.test_node_CM(node) > 0:
return 1
return None
# Being up and being stable is not the same question...
def node_stable(self, node):
'''Report the status of the cluster manager on a given node'''
if self.test_node_CM(node) == 2:
return 1
self.log("Warn: Node %s not stable" % (node))
return None
def partition_stable(self, nodes, timeout=None):
watchpats = [ ]
watchpats.append("Current ping state: S_IDLE")
watchpats.append(self.templates["Pat:DC_IDLE"])
self.debug("Waiting for cluster stability...")
if timeout == None:
timeout = self.Env["DeadTime"]
if len(nodes) < 3:
self.debug("Cluster is inactive")
return 1
idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, nodes.split(), self.Env["LogWatcher"], "ClusterStable", timeout)
idle_watch.set_watch()
for node in nodes.split():
# have each node dump its current state
self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
ret = idle_watch.look()
while ret:
self.debug(ret)
for node in nodes.split():
if re.search(node, ret):
return 1
ret = idle_watch.look()
self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout))
return None
def cluster_stable(self, timeout=None, double_check=False):
partitions = self.find_partitions()
for partition in partitions:
if not self.partition_stable(partition, timeout):
return None
if double_check:
# Make sure we are really stable and that all resources,
# including those that depend on transient node attributes,
# are started if they were going to be
time.sleep(5)
for partition in partitions:
if not self.partition_stable(partition, timeout):
return None
return 1
def is_node_dc(self, node, status_line=None):
rc = 0
if not status_line:
(_, out) = self.rsh(node, self.templates["StatusCmd"]%node, verbose=1)
if out:
status_line = out[0].strip()
if not status_line:
rc = 0
elif status_line.find('S_IDLE') != -1:
rc = 1
elif status_line.find('S_INTEGRATION') != -1:
rc = 1
elif status_line.find('S_FINALIZE_JOIN') != -1:
rc = 1
elif status_line.find('S_POLICY_ENGINE') != -1:
rc = 1
elif status_line.find('S_TRANSITION_ENGINE') != -1:
rc = 1
return rc
def active_resources(self, node):
(_, output) = self.rsh(node, "crm_resource -c", verbose=1)
resources = []
for line in output:
if re.search("^Resource", line):
tmp = AuditResource(self, line)
if tmp.type == "primitive" and tmp.host == node:
resources.append(tmp.id)
return resources
def ResourceLocation(self, rid):
ResourceNodes = []
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
cmd = self.templates["RscRunning"] % (rid)
(rc, lines) = self.rsh(node, cmd)
if rc == 127:
self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
for line in lines:
self.log("Output: "+line)
elif rc == 0:
ResourceNodes.append(node)
return ResourceNodes
def find_partitions(self):
ccm_partitions = []
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
(_, out) = self.rsh(node, self.templates["PartitionCmd"], verbose=1)
if not out:
self.log("no partition details for %s" % node)
continue
partition = out[0].strip()
if len(partition) > 2:
nodes = partition.split()
nodes.sort()
partition = ' '.join(nodes)
found = 0
for a_partition in ccm_partitions:
if partition == a_partition:
found = 1
if found == 0:
self.debug("Adding partition from %s: %s" % (node, partition))
ccm_partitions.append(partition)
else:
self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
else:
self.log("bad partition details for %s" % node)
else:
self.debug("Node %s is down... skipping" % node)
self.debug("Found partitions: %s" % repr(ccm_partitions) )
return ccm_partitions
def HasQuorum(self, node_list):
# If we are auditing a partition, then one side will
# have quorum and the other not.
# So the caller needs to tell us which we are checking
# If no value for node_list is specified... assume all nodes
if not node_list:
node_list = self.Env["nodes"]
for node in node_list:
if self.ShouldBeStatus[node] == "up":
(_, quorum) = self.rsh(node, self.templates["QuorumCmd"], verbose=1)
quorum = quorum[0].strip()
if quorum.find("1") != -1:
return 1
elif quorum.find("0") != -1:
return 0
else:
self.debug("WARN: Unexpected quorum test result from " + node + ":" + quorum)
return 0
def Components(self):
complist = []
common_ignore = [
"Pending action:",
"(ERROR|error): crm_log_message_adv:",
"(ERROR|error): MSG: No message to dump",
"pending LRM operations at shutdown",
"Lost connection to the CIB manager",
"Connection to the CIB terminated...",
"Sending message to the CIB manager FAILED",
"Action A_RECOVER .* not supported",
"(ERROR|error): stonithd_op_result_ready: not signed on",
"pingd.*(ERROR|error): send_update: Could not send update",
"send_ipc_message: IPC Channel to .* is not connected",
"unconfirmed_actions: Waiting on .* unconfirmed actions",
"cib_native_msgready: Message pending on command channel",
r": Performing A_EXIT_1 - forcefully exiting ",
r"Resource .* was active at shutdown. You may ignore this error if it is unmanaged.",
]
stonith_ignore = [
r"Updating failcount for child_DoFencing",
r"error.*: Fencer connection failed \(will retry\)",
"pacemaker-execd.*(ERROR|error): stonithd_receive_ops_result failed.",
]
stonith_ignore.extend(common_ignore)
ccm = Process(self, "ccm", pats = [
"State transition .* S_RECOVERY",
"pacemaker-controld.*Action A_RECOVER .* not supported",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*: Could not recover from internal error",
"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
# these status numbers are likely wrong now
r"pacemaker-controld.*exited with status 2",
r"attrd.*exited with status 1",
r"cib.*exited with status 2",
# Not if it was fenced
# "A new node joined the cluster",
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling node .* for fencing",
# "Executing .* fencing operation",
# "tengine_stonith_callback: .*result=0",
# "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE",
# "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
"State transition S_STARTING -> S_PENDING",
], badnews_ignore = common_ignore)
based = Process(self, "pacemaker-based", pats = [
"State transition .* S_RECOVERY",
"Lost connection to the CIB manager",
"Connection to the CIB manager terminated",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
r"pacemaker-controld.*: Could not recover from internal error",
# these status numbers are likely wrong now
r"pacemaker-controld.*exited with status 2",
r"attrd.*exited with status 1",
], badnews_ignore = common_ignore)
execd = Process(self, "pacemaker-execd", pats = [
"State transition .* S_RECOVERY",
"LRM Connection failed",
"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
"State transition S_STARTING -> S_PENDING",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*: Could not recover from internal error",
# this status number is likely wrong now
r"pacemaker-controld.*exited with status 2",
], badnews_ignore = common_ignore)
controld = Process(self, "pacemaker-controld",
pats = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling node .* for fencing",
# "Executing .* fencing operation",
# "tengine_stonith_callback: .*result=0",
"State transition .* S_IDLE",
"State transition S_STARTING -> S_PENDING",
], badnews_ignore = common_ignore)
schedulerd = Process(self, "pacemaker-schedulerd", pats = [
"State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*: Could not recover from internal error",
r"pacemaker-controld.*CRIT.*: Connection to the scheduler failed",
"pacemaker-controld.*I_ERROR.*save_cib_contents",
# this status number is likely wrong now
r"pacemaker-controld.*exited with status 2",
], badnews_ignore = common_ignore, dc_only=True)
if self.Env["DoFencing"]:
complist.append(Process(self, "stoniths", dc_pats = [
r"pacemaker-controld.*CRIT.*: Fencing daemon connection failed",
"Attempting connection to fencing daemon",
], badnews_ignore = stonith_ignore))
ccm.pats.extend([
# these status numbers are likely wrong now
r"attrd.*exited with status 1",
r"pacemaker-(based|controld).*exited with status 2",
])
based.pats.extend([
# these status numbers are likely wrong now
r"attrd.*exited with status 1",
r"pacemaker-controld.*exited with status 2",
])
execd.pats.extend([
# these status numbers are likely wrong now
r"pacemaker-controld.*exited with status 2",
])
complist.append(ccm)
complist.append(based)
complist.append(execd)
complist.append(controld)
complist.append(schedulerd)
return complist
def StandbyStatus(self, node):
(_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1)
if not out:
return "off"
out = out[0].strip()
self.debug("Standby result: "+out)
return out
# status == "on" : Enter Standby mode
# status == "off": Enter Active mode
def SetStandbyMode(self, node, status):
current_status = self.StandbyStatus(node)
cmd = self.templates["StandbyCmd"] % (node, status)
self.rsh(node, cmd)
return True
def AddDummyRsc(self, node, rid):
rsc_xml = """ '<resources>
<primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
<operations>
<op id=\"%s-interval-10s\" interval=\"10s\" name=\"monitor\"/
</operations>
</primitive>
</resources>'""" % (rid, rid)
constraint_xml = """ '<constraints>
<rsc_location id=\"location-%s-%s\" node=\"%s\" rsc=\"%s\" score=\"INFINITY\"/>
</constraints>'
""" % (rid, node, node, rid)
self.rsh(node, self.templates['CibAddXml'] % (rsc_xml))
self.rsh(node, self.templates['CibAddXml'] % (constraint_xml))
def RemoveDummyRsc(self, node, rid):
constraint = "\"//rsc_location[@rsc='%s']\"" % (rid)
rsc = "\"//primitive[@id='%s']\"" % (rid)
self.rsh(node, self.templates['CibDelXpath'] % constraint)
self.rsh(node, self.templates['CibDelXpath'] % rsc)
diff --git a/cts/lab/Makefile.am b/cts/lab/Makefile.am
index 14e3248fb3..0f4f5721fd 100644
--- a/cts/lab/Makefile.am
+++ b/cts/lab/Makefile.am
@@ -1,27 +1,26 @@
#
# Copyright 2001-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
MAINTAINERCLEANFILES = Makefile.in
noinst_SCRIPTS = cluster_test \
OCFIPraTest.py
# Commands intended to be run only via other commands
halibdir = $(CRM_DAEMON_DIR)
dist_halib_SCRIPTS = cts-log-watcher
ctslibdir = $(pythondir)/cts
ctslib_PYTHON = __init__.py \
- CIB.py \
ClusterManager.py \
CM_corosync.py
ctsdir = $(datadir)/$(PACKAGE)/tests/cts
cts_SCRIPTS = CTSlab.py \
cts
diff --git a/python/pacemaker/_cts/Makefile.am b/python/pacemaker/_cts/Makefile.am
index f1baaf66ef..0a94f75bb7 100644
--- a/python/pacemaker/_cts/Makefile.am
+++ b/python/pacemaker/_cts/Makefile.am
@@ -1,32 +1,33 @@
#
# Copyright 2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
MAINTAINERCLEANFILES = Makefile.in
pkgpythondir = $(pythondir)/$(PACKAGE)/_cts
pkgpython_PYTHON = CTS.py \
__init__.py \
audits.py \
+ cib.py \
cibxml.py \
corosync.py \
environment.py \
errors.py \
input.py \
logging.py \
network.py \
patterns.py \
process.py \
remote.py \
scenarios.py \
test.py \
timer.py \
watcher.py
SUBDIRS = tests
diff --git a/python/pacemaker/_cts/cib.py b/python/pacemaker/_cts/cib.py
new file mode 100644
index 0000000000..f272e4707c
--- /dev/null
+++ b/python/pacemaker/_cts/cib.py
@@ -0,0 +1,424 @@
+""" CIB generator for Pacemaker's Cluster Test Suite (CTS) """
+
+__all__ = ["ConfigFactory"]
+__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import warnings
+import tempfile
+
+from pacemaker.buildoptions import BuildOptions
+from pacemaker._cts.cibxml import Alerts, Clone, Expression, FencingTopology, Group, Nodes, OpDefaults, Option, Resource, Rule
+from pacemaker._cts.network import next_ip
+
+
+class CIB:
+ """ A class for generating, representing, and installing a CIB file onto
+ cluster nodes
+ """
+
+ def __init__(self, cm, version, factory, tmpfile=None):
+ """ Create a new CIB instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ version -- The schema syntax version
+ factory -- A ConfigFactory instance
+ tmpfile -- Where to store the CIB, or None to use a new tempfile
+ """
+
+ # pylint: disable=invalid-name
+ self._cib = None
+ self._cm = cm
+ self._counter = 1
+ self._factory = factory
+ self._num_nodes = 0
+
+ self.version = version
+
+ if not tmpfile:
+ warnings.filterwarnings("ignore")
+
+ # pylint: disable=consider-using-with
+ f = tempfile.NamedTemporaryFile(delete=True)
+ f.close()
+ tmpfile = f.name
+
+ warnings.resetwarnings()
+
+ self._factory.tmpfile = tmpfile
+
+ def _show(self):
+ """ Query a cluster node for its generated CIB; log and return the result """
+
+ output = ""
+ (_, result) = self._factory.rsh(self._factory.target, "HOME=/root CIB_file=%s cibadmin -Ql" % self._factory.tmpfile, verbose=1)
+
+ for line in result:
+ output += line
+ self._factory.debug("Generated Config: %s" % line)
+
+ return output
+
+ def new_ip(self, name=None):
+ """ Generate an IP resource for the next available IP address, optionally
+ specifying the resource's name.
+ """
+
+ if self._cm.Env["IPagent"] == "IPaddr2":
+ ip = next_ip(self._cm.Env["IPBase"])
+ if not name:
+ if ":" in ip:
+ (_, _, suffix) = ip.rpartition(":")
+ name = "r%s" % suffix
+ else:
+ name = "r%s" % ip
+
+ r = Resource(self._factory, name, self._cm.Env["IPagent"], "ocf")
+ r["ip"] = ip
+
+ if ":" in ip:
+ r["cidr_netmask"] = "64"
+ r["nic"] = "eth0"
+ else:
+ r["cidr_netmask"] = "32"
+
+ else:
+ if not name:
+ name = "r%s%d" % (self._cm.Env["IPagent"], self._counter)
+ self._counter += 1
+
+ r = Resource(self._factory, name, self._cm.Env["IPagent"], "ocf")
+
+ r.add_op("monitor", "5s")
+ return r
+
+ def get_node_id(self, node_name):
+ """ Check the cluster configuration for the node ID for the given node_name """
+
+ # We can't account for every possible configuration,
+ # so we only return a node ID if:
+ # * The node is specified in /etc/corosync/corosync.conf
+ # with "ring0_addr:" equal to node_name and "nodeid:"
+ # explicitly specified.
+ # In all other cases, we return 0.
+ node_id = 0
+
+ # awkward command: use } as record separator
+ # so each corosync.conf "object" is one record;
+ # match the "node {" record that has "ring0_addr: node_name";
+ # then print the substring of that record after "nodeid:"
+ (rc, output) = self._factory.rsh(self._factory.target,
+ r"""awk -v RS="}" """
+ r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/"""
+ r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}' %s"""
+ % (node_name, BuildOptions.COROSYNC_CONFIG_FILE), verbose=1)
+
+ if rc == 0 and len(output) == 1:
+ try:
+ node_id = int(output[0])
+ except ValueError:
+ node_id = 0
+
+ return node_id
+
+ def install(self, target):
+ """ Generate a CIB file and install it to the given cluster node """
+
+ old = self._factory.tmpfile
+
+ # Force a rebuild
+ self._cib = None
+
+ self._factory.tmpfile = "%s/cib.xml" % BuildOptions.CIB_DIR
+ self.contents(target)
+ self._factory.rsh(self._factory.target, "chown %s %s" % (BuildOptions.DAEMON_USER, self._factory.tmpfile))
+
+ self._factory.tmpfile = old
+
+ def contents(self, target):
+ """ Generate a complete CIB file """
+
+ # fencing resource
+ if self._cib:
+ return self._cib
+
+ if target:
+ self._factory.target = target
+
+ self._factory.rsh(self._factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self._factory.tmpfile))
+ self._num_nodes = len(self._cm.Env["nodes"])
+
+ no_quorum = "stop"
+ if self._num_nodes < 3:
+ no_quorum = "ignore"
+ self._factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self._num_nodes)
+
+ # We don't need a nodes section unless we add attributes
+ stn = None
+
+ # Fencing resource
+ # Define first so that the shell doesn't reject every update
+ if self._cm.Env["DoFencing"]:
+
+ # Define the "real" fencing device
+ st = Resource(self._factory, "Fencing", self._cm.Env["stonith-type"], "stonith")
+
+ # Set a threshold for unreliable stonith devices such as the vmware one
+ st.add_meta("migration-threshold", "5")
+ st.add_op("monitor", "120s", timeout="120s")
+ st.add_op("stop", "0", timeout="60s")
+ st.add_op("start", "0", timeout="60s")
+
+ # For remote node tests, a cluster node is stopped and brought back up
+ # as a remote node with the name "remote-OLDNAME". To allow fencing
+ # devices to fence these nodes, create a list of all possible node names.
+ all_node_names = [ prefix+n for n in self._cm.Env["nodes"] for prefix in ('', 'remote-') ]
+
+ # Add all parameters specified by user
+ entries = self._cm.Env["stonith-params"].split(',')
+ for entry in entries:
+ try:
+ (name, value) = entry.split('=', 1)
+ except ValueError:
+ print("Warning: skipping invalid fencing parameter: %s" % entry)
+ continue
+
+ # Allow user to specify "all" as the node list, and expand it here
+ if name in [ "hostlist", "pcmk_host_list" ] and value == "all":
+ value = ' '.join(all_node_names)
+
+ st[name] = value
+
+ st.commit()
+
+ # Test advanced fencing logic
+ stf_nodes = []
+ stt_nodes = []
+ attr_nodes = {}
+
+ # Create the levels
+ stl = FencingTopology(self._factory)
+ for node in self._cm.Env["nodes"]:
+ # Remote node tests will rename the node
+ remote_node = "remote-%s" % node
+
+ # Randomly assign node to a fencing method
+ ftype = self._cm.Env.random_gen.choice(["levels-and", "levels-or ", "broadcast "])
+
+ # For levels-and, randomly choose targeting by node name or attribute
+ by = ""
+
+ if ftype == "levels-and":
+ node_id = self.get_node_id(node)
+
+ if node_id == 0 or self._cm.Env.random_gen.choice([True, False]):
+ by = " (by name)"
+ else:
+ attr_nodes[node] = node_id
+ by = " (by attribute)"
+
+ self._cm.log(" - Using %s fencing for node: %s%s" % (ftype, node, by))
+
+ if ftype == "levels-and":
+ # If targeting by name, add a topology level for this node
+ if node not in attr_nodes:
+ stl.level(1, node, "FencingPass,Fencing")
+
+ # Always target remote nodes by name, otherwise we would need to add
+ # an attribute to the remote node only during remote tests (we don't
+ # want nonexistent remote nodes showing up in the non-remote tests).
+ # That complexity is not worth the effort.
+ stl.level(1, remote_node, "FencingPass,Fencing")
+
+ # Add the node (and its remote equivalent) to the list of levels-and nodes.
+ stt_nodes.extend([node, remote_node])
+
+ elif ftype == "levels-or ":
+ for n in [ node, remote_node ]:
+ stl.level(1, n, "FencingFail")
+ stl.level(2, n, "Fencing")
+
+ stf_nodes.extend([node, remote_node])
+
+ # If any levels-and nodes were targeted by attribute,
+ # create the attributes and a level for the attribute.
+ if attr_nodes:
+ stn = Nodes(self._factory)
+
+ for (node_name, node_id) in attr_nodes.items():
+ stn.add_node(node_name, node_id, { "cts-fencing" : "levels-and" })
+
+ stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and")
+
+ # Create a Dummy agent that always passes for levels-and
+ if stt_nodes:
+ stt = Resource(self._factory, "FencingPass", "fence_dummy", "stonith")
+ stt["pcmk_host_list"] = " ".join(stt_nodes)
+ # Wait this many seconds before doing anything, handy for letting disks get flushed too
+ stt["random_sleep_range"] = "30"
+ stt["mode"] = "pass"
+ stt.commit()
+
+ # Create a Dummy agent that always fails for levels-or
+ if stf_nodes:
+ stf = Resource(self._factory, "FencingFail", "fence_dummy", "stonith")
+ stf["pcmk_host_list"] = " ".join(stf_nodes)
+ # Wait this many seconds before doing anything, handy for letting disks get flushed too
+ stf["random_sleep_range"] = "30"
+ stf["mode"] = "fail"
+ stf.commit()
+
+ # Now commit the levels themselves
+ stl.commit()
+
+ o = Option(self._factory)
+ o["stonith-enabled"] = self._cm.Env["DoFencing"]
+ o["start-failure-is-fatal"] = "false"
+ o["pe-input-series-max"] = "5000"
+ o["shutdown-escalation"] = "5min"
+ o["batch-limit"] = "10"
+ o["dc-deadtime"] = "5s"
+ o["no-quorum-policy"] = no_quorum
+
+ o.commit()
+
+ o = OpDefaults(self._factory)
+ o["timeout"] = "90s"
+ o.commit()
+
+ # Commit the nodes section if we defined one
+ if stn is not None:
+ stn.commit()
+
+ # Add an alerts section if possible
+ if self._factory.rsh.exists_on_all(self._cm.Env["notification-agent"], self._cm.Env["nodes"]):
+ alerts = Alerts(self._factory)
+ alerts.add_alert(self._cm.Env["notification-agent"],
+ self._cm.Env["notification-recipient"])
+ alerts.commit()
+
+ # Add resources?
+ if self._cm.Env["CIBResource"]:
+ self.add_resources()
+
+ # generate cib
+ self._cib = self._show()
+
+ if self._factory.tmpfile != "%s/cib.xml" % BuildOptions.CIB_DIR:
+ self._factory.rsh(self._factory.target, "rm -f %s" % self._factory.tmpfile)
+
+ return self._cib
+
+ def add_resources(self):
+ """ Add various resources and their constraints to the CIB """
+
+ # Per-node resources
+ for node in self._cm.Env["nodes"]:
+ name = "rsc_%s" % node
+ r = self.new_ip(name)
+ r.prefer(node, "100")
+ r.commit()
+
+ # Migrator
+ # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
+ m = Resource(self._factory, "migrator","Dummy", "ocf", "pacemaker")
+ m["passwd"] = "whatever"
+ m.add_meta("resource-stickiness","1")
+ m.add_meta("allow-migrate", "1")
+ m.add_op("monitor", "P10S")
+ m.commit()
+
+ # Ping the test exerciser
+ p = Resource(self._factory, "ping-1","ping", "ocf", "pacemaker")
+ p.add_op("monitor", "60s")
+ p["host_list"] = self._cm.Env["cts-exerciser"]
+ p["name"] = "connected"
+ p["debug"] = "true"
+
+ c = Clone(self._factory, "Connectivity", p)
+ c["globally-unique"] = "false"
+ c.commit()
+
+ # promotable clone resource
+ s = Resource(self._factory, "stateful-1", "Stateful", "ocf", "pacemaker")
+ s.add_op("monitor", "15s", timeout="60s")
+ s.add_op("monitor", "16s", timeout="60s", role="Promoted")
+ ms = Clone(self._factory, "promotable-1", s)
+ ms["promotable"] = "true"
+ ms["clone-max"] = self._num_nodes
+ ms["clone-node-max"] = 1
+ ms["promoted-max"] = 1
+ ms["promoted-node-max"] = 1
+
+ # Require connectivity to run the promotable clone
+ r = Rule(self._factory, "connected", "-INFINITY", op="or")
+ r.add_child(Expression(self._factory, "m1-connected-1", "connected", "lt", "1"))
+ r.add_child(Expression(self._factory, "m1-connected-2", "connected", "not_defined", None))
+ ms.prefer("connected", rule=r)
+
+ ms.commit()
+
+ # Group Resource
+ g = Group(self._factory, "group-1")
+ g.add_child(self.new_ip())
+
+ if self._cm.Env["have_systemd"]:
+ sysd = Resource(self._factory, "petulant", "pacemaker-cts-dummyd@10", "service")
+ sysd.add_op("monitor", "P10S")
+ g.add_child(sysd)
+ else:
+ g.add_child(self.new_ip())
+
+ g.add_child(self.new_ip())
+
+ # Make group depend on the promotable clone
+ g.after("promotable-1", first="promote", then="start")
+ g.colocate("promotable-1", "INFINITY", withrole="Promoted")
+
+ g.commit()
+
+ # LSB resource
+ lsb = Resource(self._factory, "lsb-dummy", "LSBDummy", "lsb")
+ lsb.add_op("monitor", "5s")
+
+ # LSB with group
+ lsb.after("group-1")
+ lsb.colocate("group-1")
+
+ lsb.commit()
+
+
+class ConfigFactory:
+ """ Singleton to generate a CIB file for the environment's schema version """
+
+ def __init__(self, cm):
+ """ Create a new ConfigFactory instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ # pylint: disable=invalid-name
+ self._cm = cm
+ self.rsh = self._cm.rsh
+ if not self._cm.Env["ListTests"]:
+ self.target = self._cm.Env["nodes"][0]
+ self.tmpfile = None
+
+ def log(self, args):
+ """ Log a message """
+
+ self._cm.log("cib: %s" % args)
+
+ def debug(self, args):
+ """ Log a debug message """
+
+ self._cm.debug("cib: %s" % args)
+
+ def create_config(self, name="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION):
+ """ Return a CIB object for the given schema version """
+
+ return CIB(self._cm, name, self)
diff --git a/python/pacemaker/_cts/cibxml.py b/python/pacemaker/_cts/cibxml.py
index a4aa0209e4..88df7bf7aa 100644
--- a/python/pacemaker/_cts/cibxml.py
+++ b/python/pacemaker/_cts/cibxml.py
@@ -1,723 +1,723 @@
""" CIB XML generator for Pacemaker's Cluster Test Suite (CTS) """
__all__ = [ "Alerts", "Clone", "Expression", "FencingTopology", "Group", "Nodes", "OpDefaults", "Option", "Resource", "Rule" ]
__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
def key_val_string(**kwargs):
""" Given keyword arguments as key=value pairs, construct a single string
containing all those pairs separated by spaces. This is suitable for
using in an XML element as a list of its attributes.
Any pairs that have value=None will be skipped.
Note that a dictionary can be passed to this function instead of kwargs
by using a construction like:
key_val_string(**{"a": 1, "b": 2})
"""
retval = ""
for (k, v) in kwargs.items():
if v is None:
continue
retval += ' %s="%s"' % (k, v)
return retval
def element(element_name, **kwargs):
""" Create an XML element string with the given element_name and attributes.
This element does not support having any children, so it will be closed
on the same line. The attributes are processed by key_val_string.
"""
return "<%s %s/>" % (element_name, key_val_string(**kwargs))
def containing_element(element_name, inner, **kwargs):
""" Like element, but surrounds some child text passed by the inner
parameter.
"""
attrs = key_val_string(**kwargs)
return "<%s %s>%s</%s>" % (element_name, attrs, inner, element_name)
class XmlBase:
""" A base class for deriving all kinds of XML sections in the CIB. This
class contains only the most basic operations common to all sections.
It is up to subclasses to provide most behavior.
Note that subclasses of this base class often have different sets of
arguments to their __init__ methods. In general this is not a great
practice, however it is so thoroughly used in these classes that trying
to straighten it out is likely to cause more bugs than just leaving it
alone for now.
"""
def __init__(self, factory, tag, _id, **kwargs):
""" Create a new XmlBase instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
tag -- The XML element's start and end tag
_id -- A unique name for the element
kwargs -- Any additional key/value pairs that should be added to
this element as attributes
"""
self._children = []
self._factory = factory
self._kwargs = kwargs
self._tag = tag
self.name = _id
def __repr__(self):
""" Return a short string description of this XML section """
return "%s-%s" % (self._tag, self.name)
def add_child(self, child):
""" Add an XML section as a child of this one """
self._children.append(child)
def __setitem__(self, key, value):
""" Add a key/value pair to this element, resulting in it becoming an
XML attribute. If value is None, remove the key.
"""
if value:
self._kwargs[key] = value
else:
self._kwargs.pop(key, None)
def show(self):
""" Return a string representation of this XML section, including all
of its children
"""
text = '''<%s''' % self._tag
if self.name:
text += ''' id="%s"''' % self.name
text += key_val_string(**self._kwargs)
if not self._children:
text += '''/>'''
return text
text += '''>'''
for c in self._children:
text += c.show()
text += '''</%s>''' % self._tag
return text
def _run(self, operation, xml, section, options=""):
""" Update the CIB on the cluster to include this XML section, including
all of its children
Arguments:
operation -- Whether this update is a "create" or "modify" operation
xml -- The XML to update the CIB with, typically the result
of calling show
section -- Which section of the CIB this update applies to (see
the --scope argument to cibadmin for allowed values)
options -- Extra options to pass to cibadmin
"""
if self.name:
label = self.name
else:
label = "<%s>" % self._tag
self._factory.debug("Writing out %s" % label)
fixed = "HOME=/root CIB_file=%s" % self._factory.tmpfile
fixed += " cibadmin --%s --scope %s %s --xml-text '%s'" % (operation, section, options, xml)
(rc, _) = self._factory.rsh(self._factory.target, fixed)
if rc != 0:
raise RuntimeError("Configure call failed: %s" % fixed)
class InstanceAttributes(XmlBase):
""" A class that creates an <instance_attributes> XML section with
key/value pairs
"""
def __init__(self, factory, _id, attrs):
""" Create a new InstanceAttributes instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
_id -- A unique name for the element
attrs -- Key/value pairs to add as nvpair child elements
"""
XmlBase.__init__(self, factory, "instance_attributes", _id)
# Create an <nvpair> for each attribute
for (attr, value) in attrs.items():
self.add_child(XmlBase(factory, "nvpair", "%s-%s" % (_id, attr),
name=attr, value=value))
class Node(XmlBase):
""" A class that creates a <node> XML section for a single node, complete
with node attributes
"""
def __init__(self, factory, node_name, node_id, node_attrs):
""" Create a new Node instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
node_name -- The value of the uname attribute for this node
node_id -- A unique name for the element
node_attrs -- Additional key/value pairs to set as instance
attributes for this node
"""
XmlBase.__init__(self, factory, "node", node_id, uname=node_name)
self.add_child(InstanceAttributes(factory, "%s-1" % node_name, node_attrs))
class Nodes(XmlBase):
""" A class that creates a <nodes> XML section containing multiple Node
instances as children
"""
def __init__(self, factory):
""" Create a new Nodes instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
"""
XmlBase.__init__(self, factory, "nodes", None)
def add_node(self, node_name, node_id, node_attrs):
""" Add a child node element
Arguments:
node_name -- The value of the uname attribute for this node
node_id -- A unique name for the element
node_attrs -- Additional key/value pairs to set as instance
attributes for this node
"""
self.add_child(Node(self._factory, node_name, node_id, node_attrs))
def commit(self):
""" Modify the CIB on the cluster to include this XML section """
self._run("modify", self.show(), "configuration", "--allow-create")
class FencingTopology(XmlBase):
""" A class that creates a <fencing-topology> XML section describing how
fencing is configured in the cluster
"""
def __init__(self, factory):
""" Create a new FencingTopology instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
"""
XmlBase.__init__(self, factory, "fencing-topology", None)
def level(self, index, target, devices, target_attr=None, target_value=None):
""" Generate a <fencing-level> XML element
index -- The order in which to attempt fencing-levels
(1 through 9). Levels are attempted in ascending
order until one succeeds.
target -- The name of a single node to which this level applies
devices -- A list of devices that must all be tried for this
level
target_attr -- The name of a node attribute that is set for nodes
to which this level applies
target_value -- The value of a node attribute that is set for nodes
to which this level applies
"""
if target:
xml_id = "cts-%s.%d" % (target, index)
self.add_child(XmlBase(self._factory, "fencing-level", xml_id, target=target, index=index, devices=devices))
else:
xml_id = "%s-%s.%d" % (target_attr, target_value, index)
child = XmlBase(self._factory, "fencing-level", xml_id, index=index, devices=devices)
child["target-attribute"]=target_attr
child["target-value"]=target_value
self.add_child(child)
def commit(self):
""" Create this XML section in the CIB """
self._run("create", self.show(), "configuration", "--allow-create")
class Option(XmlBase):
""" A class that creates a <cluster_property_set> XML section of key/value
pairs for cluster-wide configuration settings
"""
def __init__(self, factory, _id="cib-bootstrap-options"):
""" Create a new Option instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
_id -- A unique name for the element
"""
XmlBase.__init__(self, factory, "cluster_property_set", _id)
def __setitem__(self, key, value):
""" Add a child nvpair element containing the given key/value pair """
self.add_child(XmlBase(self._factory, "nvpair", "cts-%s" % key, name=key, value=value))
def commit(self):
""" Modify the CIB on the cluster to include this XML section """
self._run("modify", self.show(), "crm_config", "--allow-create")
class OpDefaults(XmlBase):
""" A class that creates a <cts-op_defaults-meta> XML section of key/value
pairs for operation default settings
"""
def __init__(self, factory):
""" Create a new OpDefaults instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
"""
XmlBase.__init__(self, factory, "op_defaults", None)
self.meta = XmlBase(self._factory, "meta_attributes", "cts-op_defaults-meta")
self.add_child(self.meta)
def __setitem__(self, key, value):
""" Add a child nvpair meta_attribute element containing the given
key/value pair
"""
self.meta.add_child(XmlBase(self._factory, "nvpair", "cts-op_defaults-%s" % key, name=key, value=value))
def commit(self):
""" Modify the CIB on the cluster to include this XML section """
self._run("modify", self.show(), "configuration", "--allow-create")
class Alerts(XmlBase):
""" A class that creates an <alerts> XML section """
def __init__(self, factory):
""" Create a new Alerts instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
"""
XmlBase.__init__(self, factory, "alerts", None)
self._alert_count = 0
def add_alert(self, path, recipient):
""" Create a new alert as a child of this XML section
Arguments:
path -- The path to a script to be called when a cluster
event occurs
recipient -- An environment variable to be passed to the script
"""
self._alert_count += 1
alert = XmlBase(self._factory, "alert", "alert-%d" % self._alert_count,
path=path)
recipient1 = XmlBase(self._factory, "recipient",
"alert-%d-recipient-1" % self._alert_count,
value=recipient)
alert.add_child(recipient1)
self.add_child(alert)
def commit(self):
""" Modify the CIB on the cluster to include this XML section """
self._run("modify", self.show(), "configuration", "--allow-create")
class Expression(XmlBase):
""" A class that creates an <expression> XML element as part of some
constraint rule
"""
def __init__(self, factory, _id, attr, op, value=None):
""" Create a new Expression instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
_id -- A unique name for the element
attr -- The attribute to be tested
op -- The comparison to perform ("lt", "eq", "defined", etc.)
value -- Value for comparison (can be None for "defined" and
"not_defined" operations)
"""
XmlBase.__init__(self, factory, "expression", _id, attribute=attr, operation=op)
if value:
self["value"] = value
class Rule(XmlBase):
""" A class that creates a <rule> XML section consisting of one or more
expressions, as part of some constraint
"""
def __init__(self, factory, _id, score, op="and", expr=None):
""" Create a new Rule instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
_id -- A unique name for the element
score -- If this rule is used in a location constraint and
evaluates to true, apply this score to the constraint
op -- If this rule contains more than one expression, use this
boolean op when evaluating
expr -- An Expression instance that can be added to this Rule
when it is created
"""
XmlBase.__init__(self, factory, "rule", _id)
self["boolean-op"] = op
self["score"] = score
if expr:
self.add_child(expr)
class Resource(XmlBase):
""" A base class that creates all kinds of <resource> XML sections fully
describing a single cluster resource. This defaults to primitive
resources, but subclasses can create other types.
"""
def __init__(self, factory, _id, rtype, standard, provider=None):
""" Create a new Resource instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
_id -- A unique name for the element
rtype -- The name of the resource agent
standard -- The standard the resource agent follows ("ocf",
"systemd", etc.)
provider -- The vendor providing the resource agent
"""
XmlBase.__init__(self, factory, "native", _id)
self._provider = provider
self._rtype = rtype
self._standard = standard
self._meta = {}
self._op = []
self._param = {}
self._coloc = {}
self._needs = {}
self._scores = {}
if self._standard == "ocf" and not provider:
self._provider = "heartbeat"
elif self._standard == "lsb":
self._provider = None
def __setitem__(self, key, value):
""" Add a child nvpair element containing the given key/value pair as
an instance attribute
"""
self._add_param(key, value)
def add_op(self, _id, interval, **kwargs):
""" Add an operation child XML element to this resource
Arguments:
_id -- A unique name for the element. Also, the action to
perform ("monitor", "start", "stop", etc.)
interval -- How frequently (in seconds) to perform the operation
kwargs -- Any additional key/value pairs that should be added to
this element as attributes
"""
self._op.append(XmlBase(self._factory, "op", "%s-%s" % (_id, interval),
name=_id, interval=interval, **kwargs))
def _add_param(self, name, value):
""" Add a child nvpair element containing the given key/value pair as
an instance attribute
"""
self._param[name] = value
def add_meta(self, name, value):
""" Add a child nvpair element containing the given key/value pair as
a meta attribute
"""
self._meta[name] = value
def prefer(self, node, score="INFINITY", rule=None):
""" Add a location constraint where this resource prefers some node
Arguments:
node -- The name of the node to prefer
score -- Apply this score to the location constraint
rule -- A Rule instance to use in creating this constraint, instead
of creating a new rule
"""
if not rule:
rule = Rule(self._factory, "prefer-%s-r" % node, score,
expr=Expression(self._factory, "prefer-%s-e" % node, "#uname", "eq", node))
self._scores[node] = rule
def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs):
""" Create an ordering constraint between this resource and some other
Arguments:
resource -- The name of the dependent resource
kind -- How to enforce the constraint ("mandatory", "optional",
"serialize")
first -- The action that this resource must complete before the
then-action can be initiated for the dependent resource
("start", "stop", "promote", "demote")
then -- The action that the dependent resource can execute only
after the first-action has completed (same values as
first)
kwargs -- Any additional key/value pairs that should be added to
this element as attributes
"""
kargs = kwargs.copy()
kargs["kind"] = kind
if then:
kargs["first-action"] = "start"
kargs["then-action"] = then
if first:
kargs["first-action"] = first
self._needs[resource] = kargs
def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs):
""" Create a colocation constraint between this resource and some other
Arguments:
resource -- The name of the resource that should be located relative
this one
score -- Apply this score to the colocation constraint
role -- Apply this colocation constraint only to promotable clones
in this role ("started", "promoted", "unpromoted")
withrole -- Apply this colocation constraint only to with-rsc promotable
clones in this role
kwargs -- Any additional key/value pairs that should be added to
this element as attributes
"""
kargs = kwargs.copy()
kargs["score"] = score
if role:
kargs["rsc-role"] = role
if withrole:
kargs["with-rsc-role"] = withrole
self._coloc[resource] = kargs
def _constraints(self):
""" Generate a <constraints> XML section containing all previously added
ordering and colocation constraints
"""
text = "<constraints>"
for (k, v) in self._scores.items():
attrs = {"id": "prefer-%s" % k, "rsc": self.name}
text += containing_element("rsc_location", v.show(), **attrs)
for (k, kargs) in self._needs.items():
attrs = {"id": "%s-after-%s" % (self.name, k), "first": k, "then": self.name}
text += element("rsc_order", **attrs, **kargs)
for (k, kargs) in self._coloc.items():
attrs = {"id": "%s-with-%s" % (self.name, k), "rsc": self.name, "with-rsc": k}
text += element("rsc_colocation", **attrs)
text += "</constraints>"
return text
def show(self):
""" Return a string representation of this XML section, including all
of its children
"""
text = '''<primitive id="%s" class="%s" type="%s"''' % (self.name, self._standard, self._rtype)
if self._provider:
text += ''' provider="%s"''' % self._provider
text += '''>'''
if self._meta:
nvpairs = ""
for (p, v) in self._meta.items():
attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
nvpairs += element("nvpair", **attrs)
text += containing_element("meta_attributes", nvpairs,
id="%s-meta" % self.name)
if self._param:
nvpairs = ""
for (p, v) in self._param.items():
attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
nvpairs += element("nvpair", **attrs)
text += containing_element("instance_attributes", nvpairs,
id="%s-params" % self.name)
if self._op:
text += '''<operations>'''
for o in self._op:
key = o.name
o.name = "%s-%s" % (self.name, key)
text += o.show()
o.name = key
text += '''</operations>'''
text += '''</primitive>'''
return text
def commit(self):
""" Modify the CIB on the cluster to include this XML section """
self._run("create", self.show(), "resources")
self._run("modify", self._constraints(), "constraints")
class Group(Resource):
""" A specialized Resource subclass that creates a <group> XML section
describing a single group resource consisting of multiple child
primitive resources
"""
def __init__(self, factory, _id):
""" Create a new Group instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
_id -- A unique name for the element
"""
Resource.__init__(self, factory, _id, None, None)
self.tag = "group"
def __setitem__(self, key, value):
self.add_meta(key, value)
def show(self):
""" Return a string representation of this XML section, including all
of its children
"""
text = '''<%s id="%s">''' % (self.tag, self.name)
if len(self._meta) > 0:
nvpairs = ""
for (p, v) in self._meta.items():
attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
nvpairs += element("nvpair", **attrs)
text += containing_element("meta_attributes", nvpairs,
id="%s-meta" % self.name)
for c in self._children:
text += c.show()
text += '''</%s>''' % self.tag
return text
class Clone(Group):
""" A specialized Group subclass that creates a <clone> XML section
describing a clone resource containing multiple instances of a
single primitive resource
"""
def __init__(self, factory, _id, child=None):
""" Create a new Clone instance
Arguments:
- factory -- A CIB.ConfigFactory instance
+ factory -- A ConfigFactory instance
_id -- A unique name for the element
child -- A Resource instance that can be added to this Clone
when it is created. Alternately, use add_child later.
Note that a Clone may only have one child.
"""
Group.__init__(self, factory, _id)
self.tag = "clone"
if child:
self.add_child(child)
def add_child(self, child):
""" Add the given resource as a child of this Clone. Note that a
Clone resource only supports one child at a time.
"""
if not self._children:
self._children.append(child)
else:
self._factory.log("Clones can only have a single child. Ignoring %s" % child.name)
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index 2dfbae4a27..56246abea4 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,645 +1,646 @@
""" Test environment classes for Pacemaker's Cluster Test Suite (CTS) """
__all__ = ["EnvFactory"]
__copyright__ = "Copyright 2014-2023 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import argparse
import os
import random
import socket
import sys
import time
+from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.watcher import LogKind
class Environment:
""" A class for managing the CTS environment, consisting largely of processing
and storing command line parameters
"""
# pylint doesn't understand that self._rsh is callable (it stores the
# singleton instance of RemoteExec, as returned by the getInstance method
# of RemoteFactory). It's possible we could fix this with type annotations,
# but those were introduced with python 3.5 and we only support python 3.4.
# I think we could also fix this by getting rid of the getInstance methods,
# but that's a project for another day. For now, just disable the warning.
# pylint: disable=not-callable
def __init__(self, args):
""" Create a new Environment instance. This class can be treated kind
of like a dictionary due to the presence of typical dict functions
like __contains__, __getitem__, and __setitem__. However, it is not a
dictionary so do not rely on standard dictionary behavior.
Arguments:
args -- A list of command line parameters, minus the program name.
If None, sys.argv will be used.
"""
self.data = {}
self._nodes = []
# Set some defaults before processing command line arguments. These are
# either not set by any command line parameter, or they need a default
# that can't be set in add_argument.
self["DeadTime"] = 300
self["StartTime"] = 300
self["StableTime"] = 30
self["tests"] = []
self["IPagent"] = "IPaddr2"
self["DoFencing"] = True
self["ClobberCIB"] = False
self["CIBfilename"] = None
self["CIBResource"] = False
self["LogWatcher"] = LogKind.ANY
self["node-limit"] = 0
self["scenario"] = "random"
self.random_gen = random.Random()
self._logger = LogFactory()
self._rsh = RemoteFactory().getInstance()
self._target = "localhost"
self._seed_random()
self._parse_args(args)
if not self["ListTests"]:
self._validate()
self._discover()
def _seed_random(self, seed=None):
""" Initialize the random number generator with the given seed, or use
the current time if None
"""
if not seed:
seed = int(time.time())
self["RandSeed"] = seed
self.random_gen.seed(str(seed))
def dump(self):
""" Print the current environment """
keys = []
for key in list(self.data.keys()):
keys.append(key)
keys.sort()
for key in keys:
s = "Environment[%s]" % key
self._logger.debug("{key:35}: {val}".format(key=s, val=str(self[key])))
def keys(self):
""" Return a list of all environment keys stored in this instance """
return list(self.data.keys())
def __contains__(self, key):
""" Does the given environment key exist? """
if key == "nodes":
return True
return key in self.data
def __getitem__(self, key):
""" Return the given environment key, or None if it does not exist """
if str(key) == "0":
raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
if key == "nodes":
return self._nodes
if key == "Name":
return self._get_stack_short()
return self.data.get(key)
def __setitem__(self, key, value):
""" Set the given environment key to the given value, overriding any
previous value
"""
if key == "Stack":
self._set_stack(value)
elif key == "node-limit":
self.data[key] = value
self._filter_nodes()
elif key == "nodes":
self._nodes = []
for node in value:
# I don't think I need the IP address, etc. but this validates
# the node name against /etc/hosts and/or DNS, so it's a
# GoodThing(tm).
try:
n = node.strip()
socket.gethostbyname_ex(n)
self._nodes.append(n)
except:
self._logger.log("%s not found in DNS... aborting" % node)
raise
self._filter_nodes()
else:
self.data[key] = value
def random_node(self):
""" Choose a random node from the cluster """
return self.random_gen.choice(self["nodes"])
def get(self, key, default=None):
""" Return the value for key if key is in the environment, else default """
if key == "nodes":
return self._nodes
return self.data.get(key, default)
def _set_stack(self, name):
""" Normalize the given cluster stack name """
if name in ["corosync", "cs", "mcp"]:
self.data["Stack"] = "corosync 2+"
else:
raise ValueError("Unknown stack: %s" % name)
def _get_stack_short(self):
""" Return the short name for the currently set cluster stack """
if "Stack" not in self.data:
return "unknown"
if self.data["Stack"] == "corosync 2+":
return "crm-corosync"
LogFactory().log("Unknown stack: %s" % self["stack"])
raise ValueError("Unknown stack: %s" % self["stack"])
def _detect_systemd(self):
""" Detect whether systemd is in use on the target node """
if "have_systemd" not in self.data:
(rc, _) = self._rsh(self._target, "systemctl list-units", verbose=0)
self["have_systemd"] = rc == 0
def _detect_syslog(self):
""" Detect the syslog variant in use on the target node """
if "syslogd" not in self.data:
if self["have_systemd"]:
# Systemd
(_, lines) = self._rsh(self._target, r"systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
self["syslogd"] = lines[0].strip()
else:
# SYS-V
(_, lines) = self._rsh(self._target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
self["syslogd"] = lines[0].strip()
if "syslogd" not in self.data or not self["syslogd"]:
# default
self["syslogd"] = "rsyslog"
def disable_service(self, node, service):
""" Disable the given service on the given node """
if self["have_systemd"]:
# Systemd
(rc, _) = self._rsh(node, "systemctl disable %s" % service)
return rc
# SYS-V
(rc, _) = self._rsh(node, "chkconfig %s off" % service)
return rc
def enable_service(self, node, service):
""" Enable the given service on the given node """
if self["have_systemd"]:
# Systemd
(rc, _) = self._rsh(node, "systemctl enable %s" % service)
return rc
# SYS-V
(rc, _) = self._rsh(node, "chkconfig %s on" % service)
return rc
def service_is_enabled(self, node, service):
""" Is the given service enabled on the given node? """
if self["have_systemd"]:
# Systemd
# With "systemctl is-enabled", we should check if the service is
# explicitly "enabled" instead of the return code. For example it returns
# 0 if the service is "static" or "indirect", but they don't really count
# as "enabled".
(rc, _) = self._rsh(node, "systemctl is-enabled %s | grep enabled" % service)
return rc == 0
# SYS-V
(rc, _) = self._rsh(node, "chkconfig --list | grep -e %s.*on" % service)
return rc == 0
def _detect_at_boot(self):
""" Detect if the cluster starts at boot """
if "at-boot" not in self.data:
self["at-boot"] = self.service_is_enabled(self._target, "corosync") \
or self.service_is_enabled(self._target, "pacemaker")
def _detect_ip_offset(self):
""" Detect the offset for IPaddr resources """
if self["CIBResource"] and "IPBase" not in self.data:
(_, lines) = self._rsh(self._target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
network = lines[0].strip()
(_, lines) = self._rsh(self._target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
try:
self["IPBase"] = lines[0].strip()
except (IndexError, TypeError):
self["IPBase"] = None
if not self["IPBase"]:
self["IPBase"] = " fe80::1234:56:7890:1000"
self._logger.log("Could not determine an offset for IPaddr resources. Perhaps nmap is not installed on the nodes.")
self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
return
# pylint thinks self["IPBase"] is a list, not a string, which causes it
# to error out because a list doesn't have split().
# pylint: disable=no-member
if int(self["IPBase"].split('.')[3]) >= 240:
self._logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
% (self["IPBase"], self["IPBase"].split('.')[3]))
self["IPBase"] = " fe80::1234:56:7890:1000"
self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
def _filter_nodes(self):
""" If --limit-nodes is given, keep that many nodes from the front of the
list of cluster nodes and drop the rest
"""
if self["node-limit"] > 0:
if len(self["nodes"]) > self["node-limit"]:
# pylint thinks self["node-limit"] is a list even though we initialize
# it as an int in __init__ and treat it as an int everywhere.
# pylint: disable=bad-string-format-type
self._logger.log("Limiting the number of nodes configured=%d (max=%d)"
%(len(self["nodes"]), self["node-limit"]))
while len(self["nodes"]) > self["node-limit"]:
self["nodes"].pop(len(self["nodes"])-1)
def _validate(self):
""" Were we given all the required command line parameters? """
if not self["nodes"]:
raise ValueError("No nodes specified!")
def _discover(self):
""" Probe cluster nodes to figure out how to log and manage services """
self._target = random.Random().choice(self["nodes"])
exerciser = socket.gethostname()
# Use the IP where possible to avoid name lookup failures
for ip in socket.gethostbyname_ex(exerciser)[2]:
if ip != "127.0.0.1":
exerciser = ip
break
self["cts-exerciser"] = exerciser
self._detect_systemd()
self._detect_syslog()
self._detect_at_boot()
self._detect_ip_offset()
def _parse_args(self, argv):
""" Parse and validate command line parameters, setting the appropriate
values in the environment dictionary. If argv is None, use sys.argv
instead.
"""
if not argv:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(epilog="%s -g virt1 -r --stonith ssh --schema pacemaker-2.0 500" % sys.argv[0])
grp1 = parser.add_argument_group("Common options")
grp1.add_argument("-g", "--dsh-group", "--group",
metavar="GROUP", dest="group",
help="Use the nodes listed in the named DSH group (~/.dsh/groups/$name)")
grp1.add_argument("-l", "--limit-nodes",
type=int, default=0,
metavar="MAX",
help="Only use the first MAX cluster nodes supplied with --nodes")
grp1.add_argument("--benchmark",
action="store_true",
help="Add timing information")
grp1.add_argument("--list", "--list-tests",
action="store_true", dest="list_tests",
help="List the valid tests")
grp1.add_argument("--nodes",
metavar="NODES",
help="List of cluster nodes separated by whitespace")
grp1.add_argument("--stack",
default="corosync",
metavar="STACK",
help="Which cluster stack is installed")
grp2 = parser.add_argument_group("Options that CTS will usually auto-detect correctly")
grp2.add_argument("-L", "--logfile",
metavar="PATH",
help="Where to look for logs from cluster nodes")
grp2.add_argument("--at-boot", "--cluster-starts-at-boot",
choices=["1", "0", "yes", "no"],
help="Does the cluster software start at boot time?")
grp2.add_argument("--facility", "--syslog-facility",
default="daemon",
metavar="NAME",
help="Which syslog facility to log to")
grp2.add_argument("--ip", "--test-ip-base",
metavar="IP",
help="Offset for generated IP address resources")
grp3 = parser.add_argument_group("Options for release testing")
grp3.add_argument("-r", "--populate-resources",
action="store_true",
help="Generate a sample configuration")
grp3.add_argument("--choose",
metavar="NAME",
help="Run only the named test")
grp3.add_argument("--fencing", "--stonith",
choices=["1", "0", "yes", "no", "lha", "openstack", "rhcs", "rhevm", "scsi", "ssh", "virt", "xvm"],
default="1",
help="What fencing agent to use")
grp3.add_argument("--once",
action="store_true",
help="Run all valid tests once")
grp4 = parser.add_argument_group("Additional (less common) options")
grp4.add_argument("-c", "--clobber-cib",
action="store_true",
help="Erase any existing configuration")
grp4.add_argument("-y", "--yes",
action="store_true", dest="always_continue",
help="Continue to run whenever prompted")
grp4.add_argument("--boot",
action="store_true",
help="")
grp4.add_argument("--cib-filename",
metavar="PATH",
help="Install the given CIB file to the cluster")
grp4.add_argument("--experimental-tests",
action="store_true",
help="Include experimental tests")
grp4.add_argument("--loop-minutes",
type=int, default=60,
help="")
grp4.add_argument("--no-loop-tests",
action="store_true",
help="Don't run looping/time-based tests")
grp4.add_argument("--no-unsafe-tests",
action="store_true",
help="Don't run tests that are unsafe for use with ocfs2/drbd")
grp4.add_argument("--notification-agent",
metavar="PATH",
default="/var/lib/pacemaker/notify.sh",
help="Script to configure for Pacemaker alerts")
grp4.add_argument("--notification-recipient",
metavar="R",
default="/var/lib/pacemaker/notify.log",
help="Recipient to pass to alert script")
grp4.add_argument("--oprofile",
metavar="NODES",
help="List of cluster nodes to run oprofile on")
grp4.add_argument("--outputfile",
metavar="PATH",
help="Location to write logs to")
grp4.add_argument("--qarsh",
action="store_true",
help="Use QARSH to access nodes instead of SSH")
grp4.add_argument("--schema",
metavar="SCHEMA",
- default="pacemaker-3.0",
+ default="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION,
help="Create a CIB conforming to the given schema")
grp4.add_argument("--seed",
metavar="SEED",
help="Use the given string as the random number seed")
grp4.add_argument("--set",
action="append",
metavar="ARG",
default=[],
help="Set key=value pairs (can be specified multiple times)")
grp4.add_argument("--stonith-args",
metavar="ARGS",
default="hostlist=all,livedangerously=yes",
help="")
grp4.add_argument("--stonith-type",
metavar="TYPE",
default="external/ssh",
help="")
grp4.add_argument("--trunc",
action="store_true", dest="truncate",
help="Truncate log file before starting")
grp4.add_argument("--valgrind-procs",
metavar="PROCS",
default="pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd",
help="Run valgrind against the given space-separated list of processes")
grp4.add_argument("--valgrind-tests",
action="store_true",
help="Include tests using valgrind")
grp4.add_argument("--warn-inactive",
action="store_true",
help="Warn if a resource is assigned to an inactive node")
parser.add_argument("iterations",
nargs='?',
type=int, default=1,
help="Number of tests to run")
args = parser.parse_args(args=argv)
# Set values on this object based on what happened with command line
# processing. This has to be done in several blocks.
# These values can always be set. They get a default from the add_argument
# calls, only do one thing, and they do not have any side effects.
self["ClobberCIB"] = args.clobber_cib
self["ListTests"] = args.list_tests
self["Schema"] = args.schema
self["Stack"] = args.stack
self["SyslogFacility"] = args.facility
self["TruncateLog"] = args.truncate
self["at-boot"] = args.at_boot in ["1", "yes"]
self["benchmark"] = args.benchmark
self["continue"] = args.always_continue
self["experimental-tests"] = args.experimental_tests
self["iterations"] = args.iterations
self["loop-minutes"] = args.loop_minutes
self["loop-tests"] = not args.no_loop_tests
self["notification-agent"] = args.notification_agent
self["notification-recipient"] = args.notification_recipient
self["node-limit"] = args.limit_nodes
self["stonith-params"] = args.stonith_args
self["stonith-type"] = args.stonith_type
self["unsafe-tests"] = not args.no_unsafe_tests
self["valgrind-procs"] = args.valgrind_procs
self["valgrind-tests"] = args.valgrind_tests
self["warn-inactive"] = args.warn_inactive
# Nodes and groups are mutually exclusive, so their defaults cannot be
# set in their add_argument calls. Additionally, groups does more than
# just set a value. Here, set nodes first and then if a group is
# specified, override the previous nodes value.
if args.nodes:
self["nodes"] = args.nodes.split(" ")
else:
self["nodes"] = []
if args.group:
self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args.dsh_group)
LogFactory().add_file(self["OutputFile"], "CTS")
dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args.dsh_group)
if os.path.isfile(dsh_file):
self["nodes"] = []
with open(dsh_file, "r", encoding="utf-8") as f:
for line in f:
l = line.strip()
if not l.startswith('#'):
self["nodes"].append(l)
else:
print("Unknown DSH group: %s" % args.dsh_group)
# Everything else either can't have a default set in an add_argument
# call (likely because we don't want to always have a value set for it)
# or it does something fancier than just set a single value. However,
# order does not matter for these as long as the user doesn't provide
# conflicting arguments on the command line. So just do Everything
# alphabetically.
if args.boot:
self["scenario"] = "boot"
if args.cib_filename:
self["CIBfilename"] = args.cib_filename
else:
self["CIBfilename"] = None
if args.choose:
self["scenario"] = "sequence"
self["tests"].append(args.choose)
if args.fencing:
if args.fencing in ["0", "no"]:
self["DoFencing"] = False
else:
self["DoFencing"] = True
if args.fencing in ["rhcs", "virt", "xvm"]:
self["stonith-type"] = "fence_xvm"
elif args.fencing == "scsi":
self["stonith-type"] = "fence_scsi"
elif args.fencing in ["lha", "ssh"]:
self["stonith-params"] = "hostlist=all,livedangerously=yes"
self["stonith-type"] = "external/ssh"
elif args.fencing == "openstack":
self["stonith-type"] = "fence_openstack"
print("Obtaining OpenStack credentials from the current environment")
self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
os.environ['OS_REGION_NAME'],
os.environ['OS_TENANT_NAME'],
os.environ['OS_AUTH_URL'],
os.environ['OS_USERNAME'],
os.environ['OS_PASSWORD']
)
elif args.fencing == "rhevm":
self["stonith-type"] = "fence_rhevm"
print("Obtaining RHEV-M credentials from the current environment")
self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
os.environ['RHEVM_USERNAME'],
os.environ['RHEVM_PASSWORD'],
os.environ['RHEVM_SERVER'],
os.environ['RHEVM_PORT'],
)
if args.ip:
self["CIBResource"] = True
self["ClobberCIB"] = True
self["IPBase"] = args.ip
if args.logfile:
self["LogAuditDisabled"] = True
self["LogFileName"] = args.logfile
self["LogWatcher"] = LogKind.REMOTE_FILE
else:
# We can't set this as the default on the parser.add_argument call
# for this option because then args.logfile will be set, which means
# the above branch will be taken and those other values will also be
# set.
self["LogFileName"] = "/var/log/messages"
if args.once:
self["scenario"] = "all-once"
if args.oprofile:
self["oprofile"] = args.oprofile.split(" ")
else:
self["oprofile"] = []
if args.outputfile:
self["OutputFile"] = args.outputfile
LogFactory().add_file(self["OutputFile"])
if args.populate_resources:
self["CIBResource"] = True
self["ClobberCIB"] = True
if args.qarsh:
self._rsh.enable_qarsh()
for kv in args.set:
(name, value) = kv.split("=")
self[name] = value
print("Setting %s = %s" % (name, value))
class EnvFactory:
""" A class for constructing a singleton instance of an Environment object """
instance = None
# pylint: disable=invalid-name
def getInstance(self, args=None):
""" Returns the previously created instance of Environment, or creates a
new instance if one does not already exist.
"""
if not EnvFactory.instance:
EnvFactory.instance = Environment(args)
return EnvFactory.instance
diff --git a/python/pacemaker/buildoptions.py.in b/python/pacemaker/buildoptions.py.in
index 53b492b04f..17fe9819d7 100644
--- a/python/pacemaker/buildoptions.py.in
+++ b/python/pacemaker/buildoptions.py.in
@@ -1,57 +1,60 @@
""" A module providing information on build-time configuration of pacemaker """
__all__ = ["BuildOptions"]
__copyright__ = "Copyright 2023 the Pacemaker project contributors"
__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
class BuildOptions:
""" Variables generated as part of the ./configure && make process. These
affect how pacemaker was configured and where its various parts get
installed.
"""
BASH_PATH = "@BASH_PATH@"
""" Path to the bash shell """
_BUILD_DIR = "@abs_top_builddir@"
""" Top-level build directory
NOTE: This is not especially useful on installed systems, but is useful for
running various programs from a source checkout
"""
CIB_DIR = "@CRM_CONFIG_DIR@"
""" Where CIB files are stored """
+ CIB_SCHEMA_VERSION = "@CIB_VERSION@"
+ """ Latest supported CIB schema version number """
+
COROSYNC_CONFIG_FILE = "@PCMK__COROSYNC_CONF@"
""" Path to the corosync config file """
DAEMON_DIR = "@CRM_DAEMON_DIR@"
""" Where Pacemaker daemons are installed """
DAEMON_USER = "@CRM_DAEMON_USER@"
""" User to run Pacemaker daemons as """
LOCAL_STATE_DIR = "@localstatedir@"
""" Where miscellaneous temporary state files are stored """
LOG_DIR = "@CRM_LOG_DIR@"
""" Where Pacemaker log files are stored """
OCF_RA_INSTALL_DIR = "@OCF_RA_INSTALL_DIR@"
""" Where resource agents are installed """
OCF_ROOT_DIR = "@OCF_ROOT_DIR@"
""" Root directory for OCF resource agents and libraries """
RSC_TMP_DIR = "@CRM_RSCTMP_DIR@"
""" Where resource agents should keep state files """
# pylint: disable=comparison-of-constants
REMOTE_ENABLED = "@PC_NAME_GNUTLS@" != ""
""" Was Pacemaker Remote support built? """
SBIN_DIR = "@sbindir@"
""" Where administrative programs are installed """
SCHEMA_DIR = "@CRM_SCHEMA_DIRECTORY@"
""" Where Relax-NG schema files are stored """

File Metadata

Mime Type
text/x-diff
Expires
Mon, Apr 21, 7:13 PM (16 h, 43 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1665444
Default Alt Text
(210 KB)

Event Timeline