diff --git a/INSTALL.md b/INSTALL.md index d25d4b04d9..91fa92a01d 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,79 +1,79 @@ # How to Install Pacemaker ## Build Dependencies | Version | Fedora-based | Suse-based | Debian-based | |:---------------:|:------------------:|:------------------:|:--------------:| -| 1.11 or later | automake | automake | automake | +| 1.13 or later | automake | automake | automake | | 2.64 or later | autoconf | autoconf | autoconf | | | libtool | libtool | libtool | | | libtool-ltdl-devel | | libltdl-dev | | | libuuid-devel | libuuid-devel | uuid-dev | -| | pkgconfig | pkgconfig | pkg-config | -| 2.32.0 or later | glib2-devel | glib2-devel | libglib2.0-dev | +| 0.27 or later | pkgconfig | pkgconfig | pkg-config | +| 2.42.0 or later | glib2-devel | glib2-devel | libglib2.0-dev | | | libxml2-devel | libxml2-devel | libxml2-dev | | | libxslt-devel | libxslt-devel | libxslt-dev | | | bzip2-devel | libbz2-devel | libbz2-dev | -| | libqb-devel | libqb-devel | libqb-dev | -| 3.2 or later | python3 | python3 | python3 | +| 0.17.0 or later | libqb-devel | libqb-devel | libqb-dev | +| 3.4 or later | python3 | python3 | python3 | Also: GNU make ### Cluster Stack Dependencies *Only corosync is currently supported* | Version | Fedora-based | Suse-based | Debian-based | |:---------------:|:------------------:|:------------------:|:--------------:| | 2.0.0 or later | corosynclib | libcorosync | corosync | | 2.0.0 or later | corosynclib-devel | libcorosync-devel | | | | | | libcfg-dev | | | | | libcpg-dev | | | | | libcmap-dev | | | | | libquorum-dev | ### Optional Build Dependencies | Feature Enabled | Version | Fedora-based | Suse-based | Debian-based | |:-----------------------------------------------:|:--------------:|:-----------------------:|:-----------------------:|:-----------------------:| -| Pacemaker Remote and encrypted remote CIB admin | 2.1.7 or later | gnutls-devel | libgnutls-devel | libgnutls-dev | +| Pacemaker Remote and encrypted remote CIB admin | 2.12.0 or later| gnutls-devel | libgnutls-devel | libgnutls-dev | | encrypted remote CIB admin | | pam-devel | pam-devel | libpam0g-dev | | interactive crm_mon | | ncurses-devel | ncurses-devel | ncurses-dev | | systemd support | | systemd-devel | systemd-devel | libsystemd-dev | | systemd/upstart resource support | | dbus-devel | dbus-devel | libdbus-1-dev | | Linux-HA style fencing agents | | cluster-glue-libs-devel | libglue-devel | cluster-glue-dev | | documentation | | asciidoc or asciidoctor | asciidoc or asciidoctor | asciidoc or asciidoctor | | documentation | | help2man | help2man | help2man | | documentation | | inkscape | inkscape | inkscape | | documentation | | docbook-style-xsl | docbook-xsl-stylesheets | docbook-xsl | | documentation | | python3-sphinx | python3-sphinx | python3-sphinx | | documentation (PDF) | | latexmk texlive texlive-capt-of texlive-collection-xetex texlive-fncychap texlive-framed texlive-multirow texlive-needspace texlive-tabulary texlive-titlesec texlive-threeparttable texlive-upquote texlive-wrapfig texlive-xetex | texlive texlive-latex | texlive texlive-latex-extra | | RPM packages via "make rpm" | 4.11 or later | rpm | rpm | (n/a) | ## Optional testing dependencies * procps and psmisc (if running cts-exec, cts-fencing, or CTS) * valgrind (if running CTS valgrind tests) * python3-systemd (if using CTS on cluster nodes running systemd) * rsync (if running CTS container tests) * libvirt-daemon-driver-lxc (if running CTS container tests) * libvirt-daemon-lxc (if running CTS container tests) * libvirt-login-shell (if running CTS container tests) * nmap (if not specifying an IP address base) * oprofile (if running CTS profiling tests) * dlm (to log DLM debugging info after CTS tests) ## Simple install $ make && sudo make install If GNU make is not your default make, use "gmake" instead. ## Detailed install First, browse the build options that are available: $ ./autogen.sh $ ./configure --help Re-run ./configure with any options you want, then proceed with the simple method. diff --git a/configure.ac b/configure.ac index 4e774c1bd0..9ebec93cc9 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2035 +1,2027 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2021 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08). dnl Once we can require that version, we can simplify this, and no longer dnl need ACLOCAL_AMFLAGS in Makefile.am. m4_ifdef([AC_CONFIG_MACRO_DIRS], [AC_CONFIG_MACRO_DIRS([m4])], [AC_CONFIG_MACRO_DIR([m4])]) AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([m4/version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" LT_CONFIG_LTDL_DIR([libltdl]) AC_CONFIG_AUX_DIR([libltdl/config]) AC_CANONICAL_HOST dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) -dnl 1.11: minimum automake version required +dnl 1.13: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 -dnl silent-rules: allow "--enable-silent-rules" (no-op in 1.13+) dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) -AM_INIT_AUTOMAKE([1.11 foreign tar-ustar silent-rules subdir-objects]) +AM_INIT_AUTOMAKE([1.13 foreign tar-ustar subdir-objects]) -dnl Require pkg-config (with a minimum version) -PKG_PROG_PKG_CONFIG(0.18) +dnl Require minimum version of pkg-config +PKG_PROG_PKG_CONFIG(0.27) AS_IF([test "x${PKG_CONFIG}" != x], [], - [AC_MSG_FAILURE([Could not find required build tool pkg-config (0.18 or later)])]) -dnl PKG_NOARCH_INSTALLDIR is not available prior to pkg-config 0.27 and -dnl pkgconf 0.8.10 (uncomment next line to mimic that scenario) -dnl m4_ifdef([PKG_NOARCH_INSTALLDIR], [m4_undefine([PKG_NOARCH_INSTALLDIR])]) -m4_ifndef([PKG_NOARCH_INSTALLDIR], [ - AC_DEFUN([PKG_NOARCH_INSTALLDIR], [ - AC_SUBST([noarch_pkgconfigdir], ['${datadir}/pkgconfig']) - ]) -]) + [AC_MSG_FAILURE([Could not find required build tool pkg-config (0.27 or later)])]) +PKG_INSTALLDIR PKG_NOARCH_INSTALLDIR dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ dnl Versioned attributes implementation is not yet production-ready AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes]) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd GLIB_TESTS dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== dnl A particular compiler can be forced by setting the CC environment variable AC_PROG_CC dnl Use at least C99 if possible. This will generate an "obsolete" warning dnl since autoconf 2.70, but is needed for older versions. AC_PROG_CC_STDC dnl C++ is not needed for build, just maintainer utilities AC_PROG_CXX dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs: dnl "The macro gl_EARLY must be called as soon as possible after verifying that dnl the C compiler is working. ... The core part of the gnulib checks are done dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL dnl as a dependency. gl_EARLY gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT # --enable-new-dtags: Use RUNPATH instead of RPATH. # It is necessary to have this done before libtool does linker detection. # See also: https://github.com/kronosnet/kronosnet/issues/107 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags], [AM_LDFLAGS=-Wl,--enable-new-dtags], [AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])]) AC_SUBST([AM_LDFLAGS]) saved_LDFLAGS="$LDFLAGS" LDFLAGS="$AM_LDFLAGS $LDFLAGS" LT_INIT([dlopen]) LDFLAGS="$saved_LDFLAGS" LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING([whether $CC supports $@]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } # yes_no_try $user_response $default DISABLED=0 REQUIRED=1 OPTIONAL=2 yes_no_try() { local value AS_IF([test x"$1" = x""], [value="$2"], [value="$1"]) AS_CASE(["`echo "$value" | tr '[A-Z]' '[a-z]'`"], [0|no|false|disable], [return $DISABLED], [1|yes|true|enable], [return $REQUIRED], [try|check], [return $OPTIONAL] ) AC_MSG_ERROR([Invalid option value "$value"]) } check_systemdsystemunitdir() { AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) test x"$systemdsystemunitdir" != x"" return $? } dnl =============================================== dnl Configure Options dnl =============================================== dnl Actual library checks come later, but pkg-config can be used here to grab dnl external values to use as defaults for configure options dnl --enable-* options: build process AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])] ) yes_no_try "$enable_quiet" "no" enable_quiet=$? AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@try@:>@])], ) yes_no_try "$enable_fatal_warnings" "try" enable_fatal_warnings=$? AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])] ) yes_no_try "$enable_hardening" "try" enable_hardening=$? dnl --enable-* options: features AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])] ) yes_no_try "$enable_systemd" "try" enable_systemd=$? AC_ARG_ENABLE([upstart], [AS_HELP_STRING([--enable-upstart], [enable support for managing resources via Upstart @<:@try@:>@])] ) yes_no_try "$enable_upstart" "try" enable_upstart=$? dnl --enable-* options: compatibility AC_ARG_ENABLE([compat-2.0], [AS_HELP_STRING([--enable-compat-2.0], m4_normalize([ preserve certain output as it was in 2.0; this option will be available only for the lifetime of the 2.1 series @<:@no@:>@]))] ) yes_no_try "$enable_compat_2_0" "no" enable_compat_2_0=$? AS_IF([test $enable_compat_2_0 -ne $DISABLED], [ AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1], [Keep certain output compatible with 2.0 release series]) PCMK_FEATURES="$PCMK_FEATURES compat-2.0" ] ) # Add an option to create symlinks at the pre-2.0.0 daemon name locations, so # that users and tools can continue to invoke those names directly (e.g., for # meta-data). This option will be deprecated in a future release. AC_ARG_ENABLE([legacy-links], [AS_HELP_STRING([--enable-legacy-links], [add symlinks for old daemon names @<:@no@:>@])] ) yes_no_try "$enable_legacy_links" "no" enable_legacy_links=$? AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED]) dnl --with-* options: basic parameters dnl This argument is defined via an M4 macro so default can be a variable AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACEMAKER_VERSION="$withval" ], [ PACEMAKER_VERSION="$PACKAGE_VERSION" ])] ) VERSION_ARG(VERSION_NUMBER) # Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case # the user used --with-version. Unfortunately, this can only affect the # substitution variables and later uses in this file, not the config.h # constants, so we have to be careful to use only PACEMAKER_VERSION in C code. PACKAGE_VERSION=$PACEMAKER_VERSION VERSION=$PACEMAKER_VERSION CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], m4_normalize([ address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))], [ BUG_URL="$withval" ] ) dnl --with-* options: features AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets @<:@no@:>@])] ) yes_no_try "$with_cibsecrets" "no" with_cibsecrets=$? AC_ARG_WITH([gnutls], [AS_HELP_STRING([--with-gnutls], [support Pacemaker Remote and remote-tls-port using GnuTLS @<:@try@:>@])] ) yes_no_try "$with_gnutls" "try" with_gnutls=$? PCMK_GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] ) AC_ARG_WITH([concurrent-fencing-default], [AS_HELP_STRING([--with-concurrent-fencing-default], [default value for concurrent-fencing cluster option @<:@false@:>@])], ) AS_CASE([$with_concurrent_fencing_default], [""], [with_concurrent_fencing_default="false"], [false], [], [true], [PCMK_FEATURES="$PCMK_FEATURES default-concurrent-fencing"], [AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])] ) AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT], ["$with_concurrent_fencing_default"], [Default value for concurrent-fencing cluster option]) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer @<:@try@:>@])] ) yes_no_try "$with_corosync" "try" with_corosync=$? AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios resources])] ) yes_no_try "$with_nagios" "try" with_nagios=$? dnl --with-* options: directory locations AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], [directory for init (rc) scripts])], [ INITDIR="$withval" ] ) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) dnl --runstatedir is available as of autoconf 2.70 (2020-12-08). When users dnl have an older version, they can use our --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) dnl Get default from resource-agents if possible. Otherwise, the default uses dnl /usr/lib rather than libdir because it's determined by the OCF project and dnl not Pacemaker. Even if a user wants to install Pacemaker to /usr/local or dnl such, the OCF agents will be expected in their usual location. However, we dnl do give the user the option to override it. PKG_CHECK_VAR([OCF_ROOT_DIR], [resource-agents], [ocfrootdir], [], [OCF_ROOT_DIR="/usr/lib/ocf"]) AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], m4_normalize([ OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@value from resource-agents package if available otherwise /usr/lib/ocf@:>@]))], [ OCF_ROOT_DIR="$withval" ] ) AC_SUBST(OCF_ROOT_DIR) AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"], [OCF root directory for resource agents and libraries]) dnl Get default from fence-agents if available PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix], [PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"], [PCMK__FENCE_BINDIR="$sbindir"]) AC_ARG_WITH([fence-bindir], [AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([ directory for executable fence agents @<:@value from fence-agents package if available otherwise SBINDIR@:>@]))], [ PCMK__FENCE_BINDIR="$withval" ] ) AC_SUBST(PCMK__FENCE_BINDIR) dnl --with-* options: non-production testing AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations, for effective profiling @<:@no@:>@])] ) yes_no_try "$with_profiling" "no" with_profiling=$? AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations, for effective profiling and coverage testing @<:@no@:>@])] ) yes_no_try "$with_coverage" "no" with_coverage=$? AC_ARG_WITH([sanitizers], [AS_HELP_STRING([--with-sanitizers=...,...], [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])], [ SANITIZERS="$withval" ], [ SANITIZERS="" ]) dnl =============================================== dnl General Processing dnl =============================================== AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION", [Version number of this Pacemaker build]) PACKAGE_SERIES=`echo $VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_PROG_LN_S AC_PROG_MKDIR_P # Check for fatal warning support AS_IF([test $enable_fatal_warnings -ne $DISABLED && test "$GCC" = "yes" && cc_supports_flag -Werror], [WERROR="-Werror"], [ WERROR="" AS_CASE([$enable_fatal_warnings], [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])], [$OPTIONAL], [ AC_MSG_NOTICE([Compiler does not support fatal warnings]) enable_fatal_warnings=$DISABLED ]) ]) AC_MSG_NOTICE([Sanitizing prefix: ${prefix}]) AS_IF([test "$prefix" = "NONE"], [ prefix=/usr dnl Fix default variables - "prefix" variable if not specified AS_IF([test "$localstatedir" = "\${prefix}/var"], [localstatedir="/var"]) AS_IF([test "$sysconfdir" = "\${prefix}/etc"], [sysconfdir="/etc"]) ]) AC_MSG_NOTICE([Sanitizing exec_prefix: ${exec_prefix}]) case $exec_prefix in prefix|NONE) exec_prefix=$prefix ;; esac AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}]) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING([which init (rc) directory to use]) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT([$INITDIR]) ;; esac AC_SUBST(INITDIR) AC_MSG_NOTICE([Sanitizing libdir: ${libdir}]) case $libdir in prefix|NONE) AC_MSG_CHECKING([which lib directory to use]) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT([$libdir]); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables if [ test "x${runstatedir}" = "x" ]; then if [ test "x${pcmk_runstatedir}" = "x" ]; then runstatedir="${localstatedir}/run" else runstatedir="${pcmk_runstatedir}" fi fi eval runstatedir="$(eval echo ${runstatedir})" AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) if test x"${CRM_LOG_DIR}" = x""; then CRM_LOG_DIR="${localstatedir}/log/pacemaker" fi AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) if test x"${CRM_BUNDLE_DIR}" = x""; then CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles" fi AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) eval PCMK__FENCE_BINDIR="`eval echo ${PCMK__FENCE_BINDIR}`" AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR", [Location for executable fence agents]) AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [], [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])]) AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) if test x"${BUG_URL}" = x""; then BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker" fi AC_SUBST(BUG_URL) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist (yet)]) fi done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([US_AUTH_PEERCRED_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([US_AUTH_PEERCRED_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) AS_IF([test -z "${us_auth}"], [ # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([US_AUTH_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([US_AUTH_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_FAILURE([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) ]) dnl OS-based decision-making is poor autotools practice; feature-based dnl mechanisms are strongly preferred. Keep this section to a bare minimum; dnl regard as a "necessary evil". INIT_EXT="" PROCFS=0 case "$host_os" in dnl Solaris and some *BSD versions support procfs but not files we need *bsd*) INIT_EXT=".sh" ;; *linux*) PROCFS=1 ;; darwin*) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_SUBST(INIT_EXT) AC_DEFINE_UNQUOTED([SUPPORT_PROCFS], [$PROCFS], [Define to 1 if procfs is supported]) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac ;; esac # C99 doesn't guarantee uint64_t type and related format specifiers, but # prerequisites, corosync + libqb, use that widely, so the target platforms # are already pre-constrained to those "64bit-clean" (doesn't imply native # bit width) and hence we deliberately refrain from artificial surrogates # (sans manipulation through cached values). AC_CACHE_VAL( [pcmk_cv_decl_inttypes], [ AC_CHECK_DECLS( [PRIu64, PRIu32, PRIx32, SCNu64], [pcmk_cv_decl_inttypes="PRIu64 PRIu32 PRIx32 SCNu64"], [ # test shall only react on "no" cached result & error out respectively if test "x$ac_cv_have_decl_PRIu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint64_t (PRIu64)]) elif test "x$ac_cv_have_decl_PRIu32" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint32_t (PRIu32)]) elif test "x$ac_cv_have_decl_PRIx32" = xno; then AC_MSG_ERROR([lack of inttypes.h based hexa specifier serving uint32_t (PRIx32)]) elif test "x$ac_cv_have_decl_SCNu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier gathering uint64_t (SCNu64)]) fi ], [[#include ]] ) ] ) ( set $pcmk_cv_decl_inttypes AC_DEFINE_UNQUOTED([U64T], [$1], [Correct format specifier for U64T]) AC_DEFINE_UNQUOTED([U32T], [$2], [Correct format specifier for U32T]) AC_DEFINE_UNQUOTED([X32T], [$3], [Correct format specifier for X32T]) AC_DEFINE_UNQUOTED([U64TS], [$4], [Correct format specifier for U64TS]) ) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) AS_IF([test "x${LIBTOOL}" != "x"], [], [AC_MSG_FAILURE([Could not find required build tool libtool (or equivalent)])]) dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])]) -dnl Pacemaker requires a minimum Python version of 3.2 -AM_PATH_PYTHON([3.2]) +dnl Require a minimum Python version +AM_PATH_PYTHON([3.4]) AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) dnl Bash is needed for building man pages and running regression tests. dnl BASH is already an environment variable, so use something else. AC_PATH_PROG([BASH_PATH], [bash]) AS_IF([test "x${BASH_PATH}" != "x"], [], [AC_MSG_FAILURE([Could not find required build tool bash])]) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING([for DocBook-to-manpage transform]) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS} do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT([$MANPAGE_XSLT]) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) if test "x${ASCIIDOC_CONV}" != x; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""]) AM_COND_IF([BUILD_SPHINX_DOCS], [PCMK_FEATURES="$PCMK_FEATURES books"]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then $GETOPT_PATH -T >/dev/null 2>/dev/null if test $? -eq 4; then break fi fi GETOPT_PATH="" done IFS=$IFS_orig AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])], [ AC_MSG_RESULT([no]) AC_MSG_ERROR([Could not find required build tool GNU-compatible getopt]) ]) AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolerably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolerably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir strerror strchrnul strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) PKG_CHECK_MODULES([UUID], [uuid], [CPPFLAGS="${CPPFLAGS} ${UUID_CFLAGS}" LIBS="${LIBS} ${UUID_LIBS}"]) AC_CHECK_FUNCS([sched_setscheduler]) if test "$ac_cv_func_sched_setscheduler" != yes; then PC_LIBS_RT="" else PC_LIBS_RT="-lrt" fi AC_SUBST(PC_LIBS_RT) -# Require glib 2.32.0 (2012-03) or later -PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.32.0], +# Require minimum glib version +PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.42.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # Check whether high-resolution sleep function is available AC_CHECK_FUNCS([nanosleep usleep]) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) REQUIRE_LIB([xslt], [xsltApplyStylesheet]) dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) cc_temp_flags "$CFLAGS $WERROR" # Optional headers (inclusion of these should be conditional in C code) AC_CHECK_HEADERS([getopt.h]) AC_CHECK_HEADERS([linux/swab.h]) AC_CHECK_HEADERS([stddef.h]) AC_CHECK_HEADERS([sys/signalfd.h]) AC_CHECK_HEADERS([uuid/uuid.h]) AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h]) # Required headers REQUIRE_HEADER([arpa/inet.h]) REQUIRE_HEADER([ctype.h]) REQUIRE_HEADER([dirent.h]) REQUIRE_HEADER([errno.h]) REQUIRE_HEADER([glib.h]) REQUIRE_HEADER([grp.h]) REQUIRE_HEADER([limits.h]) REQUIRE_HEADER([netdb.h]) REQUIRE_HEADER([netinet/in.h]) REQUIRE_HEADER([netinet/ip.h], [ #include #include ]) REQUIRE_HEADER([pwd.h]) REQUIRE_HEADER([signal.h]) REQUIRE_HEADER([stdio.h]) REQUIRE_HEADER([stdlib.h]) REQUIRE_HEADER([string.h]) REQUIRE_HEADER([strings.h]) REQUIRE_HEADER([sys/ioctl.h]) REQUIRE_HEADER([sys/param.h]) REQUIRE_HEADER([sys/reboot.h]) REQUIRE_HEADER([sys/resource.h]) REQUIRE_HEADER([sys/socket.h]) REQUIRE_HEADER([sys/stat.h]) REQUIRE_HEADER([sys/time.h]) REQUIRE_HEADER([sys/types.h]) REQUIRE_HEADER([sys/utsname.h]) REQUIRE_HEADER([sys/wait.h]) REQUIRE_HEADER([time.h]) REQUIRE_HEADER([unistd.h]) REQUIRE_HEADER([libxml/xpath.h]) REQUIRE_HEADER([libxslt/xslt.h]) cc_restore_flags AC_CHECK_FUNCS([uuid_unparse], [], [AC_MSG_FAILURE([Could not find required C function uuid_unparse()])]) AC_CACHE_CHECK([whether __progname and __progname_full are available], [pf_cv_var_progname], AC_LINK_IFELSE([ AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]], [[__progname = "foo"; __progname_full = "foo bar";]], [pf_cv_var_progname="yes"], [pf_cv_var_progname="no"]) ])) AS_IF([test "$pf_cv_var_progname" = "yes"], [AC_DEFINE(HAVE___PROGNAME,1,[ ])]) dnl ======================================================================== dnl Generic declarations dnl ======================================================================== AC_CHECK_DECLS([CLOCK_MONOTONIC], [PCMK_FEATURES="$PCMK_FEATURES monotonic"], [], [[ #include ]]) dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== REQUIRE_FUNC([getopt]) REQUIRE_FUNC([setenv]) REQUIRE_FUNC([unsetenv]) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) AS_IF([test "$pf_cv_var_sscanf" = "yes"], [AC_DEFINE(SSCANF_HAS_M, 1, [ ])]) dnl ======================================================================== dnl bzip2 dnl ======================================================================== REQUIRE_HEADER([bzlib.h]) REQUIRE_LIB([bz2], [BZ2_bzBuffToBuffCompress]) dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[sighandler_t *f;]])], [ AC_MSG_RESULT([yes]) AC_DEFINE([HAVE_SIGHANDLER_T], [1], [Define to 1 if sighandler_t is available]) ], [AC_MSG_RESULT([no])]) dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurses takes precedence. dnl AC_CHECK_HEADERS([curses.h curses/curses.h ncurses.h ncurses/ncurses.h]) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" AS_IF([test "$ac_cv_header_ncurses_h" = "yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" ]) AS_IF([test "$ac_cv_header_ncurses_ncurses_h" = "yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" ]) dnl Only look for non-n-library if there was no n-library. AS_IF([test X"$CURSESLIBS" = X"" && test "$ac_cv_header_curses_h" = "yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" ]) dnl Only look for non-n-library if there was no n-library. AS_IF([test X"$CURSESLIBS" = X"" && test "$ac_cv_header_curses_curses_h" = "yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" ]) if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility AS_IF([test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual], [ ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" cc_temp_flags "-Wcast-qual $WERROR" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 AS_IF([cc_supports_flag -fPIC], [CFLAGS="$CFLAGS -fPIC"]) AC_MSG_CHECKING([whether curses library is compatible]) AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [AC_MSG_RESULT([yes])], [ AC_MSG_RESULT([no]) AC_MSG_WARN(m4_normalize([Disabling curses because the printw() function of your (n)curses library is old. If you wish to enable curses, update to a newer version (ncurses 5.4 or later is recommended, available from https://invisible-island.net/ncurses/) ])) AC_DEFINE([HAVE_INCOMPATIBLE_PRINTW], [1], [Define to 1 if curses library has incompatible printw()]) ] ) LIBS=$ac_save_LIBS cc_restore_flags ]) AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== CFLAGS_ORIG="$CFLAGS" AS_IF([test $with_coverage -ne $DISABLED], [ with_profiling=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ] ) AS_IF([test $with_profiling -ne $DISABLED], [ with_profiling=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES profile" dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin" dnl CFLAGS="$CFLAGS -fno-inline-functions" dnl CFLAGS="$CFLAGS -fno-default-inline" dnl CFLAGS="$CFLAGS -fno-inline-functions-called-once" dnl CFLAGS="$CFLAGS -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed \ -e 's/-O.\ //g' \ -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' \ -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" AC_MSG_NOTICE([CFLAGS before adding profiling options: $CFLAGS_ORIG]) AC_MSG_NOTICE([CFLAGS after: $CFLAGS]) ] ) AC_DEFINE_UNQUOTED([SUPPORT_PROFILING], [$with_profiling], [Support profiling]) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== PKG_CHECK_MODULES(libqb, libqb >= 0.17) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 2.0.2+ (2020-10) AC_CHECK_FUNCS(qb_ipcc_auth_get, AC_DEFINE(HAVE_IPCC_AUTH_GET, 1, [Have qb_ipcc_auth_get function])) dnl libqb 2.0.0+ (2020-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_CHECK_HEADERS([stonith/stonith.h], [ AC_CHECK_LIB([pils], [PILLoadPlugin]) AC_CHECK_LIB([plumb], [G_main_add_IPC_Channel]) PCMK_FEATURES="$PCMK_FEATURES lha" ]) AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) if test x"${CRM_DAEMON_USER}" = x""; then CRM_DAEMON_USER="hacluster" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) if test x"${CRM_DAEMON_GROUP}" = x""; then CRM_DAEMON_GROUP="haclient" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) OCF_RA_DIR="$OCF_ROOT_DIR/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries]) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING([build version]) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT([$BUILD_VERSION (archive hash)]) elif test -x $GIT && test -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT([$BUILD_VERSION (git hash)]) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT([$BUILD_VERSION (directory name)]) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test $HAVE_dbus = 0; then PC_NAME_DBUS="" else PC_NAME_DBUS="dbus-1" fi AC_SUBST(PC_NAME_DBUS) AS_CASE([$enable_systemd], [$REQUIRED], [ AS_IF([test $HAVE_dbus = 0], [AC_MSG_FAILURE([Cannot support systemd resources without DBus])]) AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"], [AC_MSG_FAILURE([Cannot support systemd resources without monotonic clock])]) AS_IF([check_systemdsystemunitdir], [], [AC_MSG_FAILURE([Cannot support systemd resources without systemdsystemunitdir])]) ], [$OPTIONAL], [ AS_IF([test $HAVE_dbus = 0 \ || test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"], [enable_systemd=$DISABLED], [ AC_MSG_CHECKING([for systemd version (using dbus-send)]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) AS_IF([test "$ret" != "unavailable" \ || systemctl --version 2>/dev/null | grep -q systemd], [ AS_IF([check_systemdsystemunitdir], [enable_systemd=$REQUIRED], [enable_systemd=$DISABLED]) ], [enable_systemd=$DISABLED] ) ]) ], ) AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AS_IF([test $enable_systemd -eq $DISABLED], [AC_MSG_RESULT([no])], [ AC_MSG_RESULT([yes]) PCMK_FEATURES="$PCMK_FEATURES systemd" ] ) AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED([SUPPORT_SYSTEMD], [$enable_systemd], [Support systemd resources]) AM_CONDITIONAL([BUILD_SYSTEMD], [test $enable_systemd = $REQUIRED]) AC_SUBST(SUPPORT_SYSTEMD) AS_CASE([$enable_upstart], [$REQUIRED], [ AS_IF([test $HAVE_dbus = 0], [AC_MSG_FAILURE([Cannot support Upstart resources without DBus])]) ], [$OPTIONAL], [ AS_IF([test $HAVE_dbus = 0], [enable_upstart=$DISABLED], [ AC_MSG_CHECKING([for Upstart version (using dbus-send)]) ret=$({ dbus-send --system --print-reply \ --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) AS_IF([test "$ret" != "unavailable" \ || initctl --version 2>/dev/null | grep -q upstart], [enable_upstart=$REQUIRED], [enable_upstart=$DISABLED] ) ]) ], ) AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AS_IF([test $enable_upstart -eq $DISABLED], [AC_MSG_RESULT([no])], [ AC_MSG_RESULT([yes]) PCMK_FEATURES="$PCMK_FEATURES upstart" ] ) AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart], [Support Upstart resources]) AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED]) AC_SUBST(SUPPORT_UPSTART) AS_CASE([$with_nagios], [$REQUIRED], [ AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"], [AC_MSG_FAILURE([Cannot support nagios resources without monotonic clock])]) ], [$OPTIONAL], [ AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"], [with_nagios=$DISABLED], [with_nagios=$REQUIRED]) ] ) AS_IF([test $with_nagios -eq $REQUIRED], [PCMK_FEATURES="$PCMK_FEATURES nagios"]) AC_DEFINE_UNQUOTED([SUPPORT_NAGIOS], [$with_nagios], [Support nagios plugins]) AM_CONDITIONAL([BUILD_NAGIOS], [test $with_nagios -eq $REQUIRED]) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== COROSYNC_LIBS="" AS_CASE([$with_corosync], [$REQUIRED], [ # These will be fatal if unavailable PKG_CHECK_MODULES([cpg], [libcpg]) PKG_CHECK_MODULES([cfg], [libcfg]) PKG_CHECK_MODULES([cmap], [libcmap]) PKG_CHECK_MODULES([quorum], [libquorum]) PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common]) ] [$OPTIONAL], [ PKG_CHECK_MODULES([cpg], [libcpg], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([cfg], [libcfg], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([cmap], [libcmap], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([quorum], [libquorum], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common], [], [with_corosync=$DISABLED]) AS_IF([test $with_corosync -ne $DISABLED], [with_corosync=$REQUIRED]) ] ) AS_IF([test $with_corosync -ne $DISABLED], [ AC_MSG_CHECKING([for Corosync 2 or later]) AC_MSG_RESULT([yes]) CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-ge-2" dnl Shutdown tracking added (back) to corosync Jan 2021 saved_LIBS="$LIBS" LIBS="$LIBS $COROSYNC_LIBS" AC_CHECK_FUNCS(corosync_cfg_trackstart, AC_DEFINE(HAVE_COROSYNC_CFG_TRACKSTART, 1, [Have corosync_cfg_trackstart function])) LIBS="$saved_LIBS" ] ) AC_DEFINE_UNQUOTED([SUPPORT_COROSYNC], [$with_corosync], [Support the Corosync messaging and membership layer]) AM_CONDITIONAL([BUILD_CS_SUPPORT], [test $with_corosync -eq $REQUIRED]) AC_SUBST([SUPPORT_COROSYNC]) dnl dnl Cluster stack - Sanity dnl AS_IF([test "x$STACKS" != "x"], [AC_MSG_NOTICE([Supported stacks:${STACKS}])], [AC_MSG_FAILURE([At least one cluster stack must be supported])]) PCMK_FEATURES="${PCMK_FEATURES}${STACKS}" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== AS_IF([test $with_cibsecrets -ne $DISABLED], [ with_cibsecrets=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED([LRM_CIBSECRETS_DIR], ["$LRM_CIBSECRETS_DIR"], [Location for CIB secrets]) AC_SUBST([LRM_CIBSECRETS_DIR]) ] ) AC_DEFINE_UNQUOTED([SUPPORT_CIBSECRETS], [$with_cibsecrets], [Support CIB secrets]) AM_CONDITIONAL([BUILD_CIBSECRETS], [test $with_cibsecrets -eq $REQUIRED]) dnl ======================================================================== dnl GnuTLS dnl ======================================================================== dnl Require GnuTLS >=2.12.0 (2011-03) for Pacemaker Remote support PC_NAME_GNUTLS="" AS_CASE([$with_gnutls], [$REQUIRED], [ REQUIRE_LIB([gnutls], [gnutls_sec_param_to_pk_bits]) REQUIRE_HEADER([gnutls/gnutls.h]) ], [$OPTIONAL], [ AC_CHECK_LIB([gnutls], [gnutls_sec_param_to_pk_bits], [], [with_gnutls=$DISABLED]) AC_CHECK_HEADERS([gnutls/gnutls.h], [], [with_gnutls=$DISABLED]) ] ) AS_IF([test $with_gnutls -ne $DISABLED], [ PC_NAME_GNUTLS="gnutls" PCMK_FEATURES="$PCMK_FEATURES remote" ] ) AC_SUBST([PC_NAME_GNUTLS]) AM_CONDITIONAL([BUILD_REMOTE], [test $with_gnutls -ne $DISABLED]) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING([for $SERVICELOG packages]) if $PKG_CONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" PCMK_FEATURES="$PCMK_FEATURES servicelog" fi AC_MSG_RESULT([$SERVICELOG_EXISTS]) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING([for $SERVICELOG $OPENIPMI packages]) if $PKG_CONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) REQUIRE_HEADER([malloc.h]) OPENIPMI_SERVICELOG_EXISTS="yes" PCMK_FEATURES="$PCMK_FEATURES ipmiservicelogd" fi AC_MSG_RESULT([$OPENIPMI_SERVICELOG_EXISTS]) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") # --- ASAN/UBSAN/TSAN (see man gcc) --- # when using SANitizers, we need to pass the -fsanitize.. # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be # specified as first in the list or there will be runtime # issues (for example user has to LD_PRELOAD asan for it to work # properly). AS_IF([test -n "${SANITIZERS}"], [ SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g') for SANITIZER in $SANITIZERS do AS_CASE([$SANITIZER], [asan|ASAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan" REQUIRE_LIB([asan],[main]) ], [ubsan|UBSAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan" REQUIRE_LIB([ubsan],[main]) ], [tsan|TSAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan" REQUIRE_LIB([tsan],[main]) ]) done ]) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" AS_IF([test "$GCC" != yes], [CFLAGS="$CFLAGS -g"], [ CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags AS_IF([cc_supports_flag "-Wformat-nonliteral"], [gcc_format_nonliteral=yes], [gcc_format_nonliteral=no]) # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline" EXTRA_FLAGS="$EXTRA_FLAGS -Wall" EXTRA_FLAGS="$EXTRA_FLAGS -Waggregate-return" EXTRA_FLAGS="$EXTRA_FLAGS -Wbad-function-cast" EXTRA_FLAGS="$EXTRA_FLAGS -Wcast-align" EXTRA_FLAGS="$EXTRA_FLAGS -Wdeclaration-after-statement" EXTRA_FLAGS="$EXTRA_FLAGS -Wendif-labels" EXTRA_FLAGS="$EXTRA_FLAGS -Wfloat-equal" EXTRA_FLAGS="$EXTRA_FLAGS -Wformat-security" EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-prototypes" EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-declarations" EXTRA_FLAGS="$EXTRA_FLAGS -Wnested-externs" EXTRA_FLAGS="$EXTRA_FLAGS -Wno-long-long" EXTRA_FLAGS="$EXTRA_FLAGS -Wno-strict-aliasing" EXTRA_FLAGS="$EXTRA_FLAGS -Wpointer-arith" EXTRA_FLAGS="$EXTRA_FLAGS -Wstrict-prototypes" EXTRA_FLAGS="$EXTRA_FLAGS -Wwrite-strings" EXTRA_FLAGS="$EXTRA_FLAGS -Wunused-but-set-variable" EXTRA_FLAGS="$EXTRA_FLAGS -Wunsigned-char" AS_IF([test "x$gcc_diagnostic_push_pull" = "xyes"], [ AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" ], [test "x$gcc_format_nonliteral" = "xyes"], [EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"]) # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do AS_IF([cc_supports_flag $CC_EXTRAS $j], [CC_EXTRAS="$CC_EXTRAS $j"]) done AC_MSG_NOTICE([Using additional gcc flags: ${CC_EXTRAS}]) ]) dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl cib, stonithd, pacemaker-remoted, pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl AS_IF([test $enable_hardening -eq $OPTIONAL], [ AS_IF([test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0], [enable_hardening=$REQUIRED], [AC_MSG_NOTICE([Hardening: using custom flags from environment])] ) ], [ unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB ] ) AS_CASE([$enable_hardening], [$DISABLED], [AC_MSG_NOTICE([Hardening: explicitly disabled])], [$REQUIRED], [ CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" relro=1 ]) # daemons: PIE for both CFLAGS and LDFLAGS AS_IF([cc_supports_flag -fPIE], [ flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [ CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE" LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" pie=1 ]) ] ) # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries AS_IF([test "${relro}" = 1 && test "${pie}" = 1], [ flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" bindnow=1 ]) ] ) AS_IF([test "${bindnow}" = 1], [ flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" ]) ]) # universal: prefer strong > all > default stack protector if possible flag= AS_IF([cc_supports_flag -fstack-protector-strong], [flag="-fstack-protector-strong"], [cc_supports_flag -fstack-protector-all], [flag="-fstack-protector-all"], [cc_supports_flag -fstack-protector], [flag="-fstack-protector"] ) AS_IF([test -n "${flag}"], [ CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 ] ) AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test "${stackprot}" = 1], [AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}])], [AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])] ) ], ) CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS" LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS" CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE" LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl AS_IF([test $enable_fatal_warnings -ne $DISABLED], [ AC_MSG_NOTICE([Enabling fatal compiler warnings]) CFLAGS="$CFLAGS $WERROR" ]) AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output AS_IF([test $enable_quiet -ne $DISABLED], [ AC_MSG_NOTICE([Suppressing make details]) QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant ], [ QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" ] ) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl Files we output that need to be executable CONFIG_FILES_EXEC([cts/CTSlab.py], [cts/LSBDummy], [cts/OCFIPraTest.py], [cts/cluster_test], [cts/cts], [cts/cts-cli], [cts/cts-coverage], [cts/cts-exec], [cts/cts-fencing], [cts/cts-log-watcher], [cts/cts-regression], [cts/cts-scheduler], [cts/cts-support], [cts/lxc_autogen.sh], [cts/benchmark/clubench], [cts/fence_dummy], [cts/pacemaker-cts-dummyd], [daemons/fenced/fence_legacy], [doc/abi-check], [extra/resources/ClusterMon], [extra/resources/HealthSMART], [extra/resources/SysInfo], [extra/resources/ifspeed], [extra/resources/o2cb], [tools/crm_failcount], [tools/crm_master], [tools/crm_report], [tools/crm_standby], [tools/cibsecret], [tools/pcmk_simtimes]) dnl Other files we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTS.py \ cts/CTSvars.py \ cts/benchmark/Makefile \ cts/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/schedulerd/Makefile \ devel/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/sphinx/Makefile \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ replace/Makefile \ lib/Makefile \ lib/libpacemaker.pc \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/agents/Makefile \ lib/common/tests/cmdline/Makefile \ lib/common/tests/flags/Makefile \ lib/common/tests/operations/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/common/tests/xpath/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/gnu/Makefile \ lib/pacemaker/Makefile \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/rules/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ maint/Makefile \ tests/Makefile \ tools/Makefile \ tools/report.collector \ tools/report.common \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() # Strip leading space from features list PCMK_FEATURES=`echo -e "$PCMK_FEATURES" | sed -e 's/^ //'` dnl ***************** dnl Configure summary dnl ***************** AC_MSG_NOTICE([]) AC_MSG_NOTICE([$PACKAGE configuration:]) AC_MSG_NOTICE([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_NOTICE([ Features = ${PCMK_FEATURES}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ Prefix = ${prefix}]) AC_MSG_NOTICE([ Executables = ${sbindir}]) AC_MSG_NOTICE([ Man pages = ${mandir}]) AC_MSG_NOTICE([ Libraries = ${libdir}]) AC_MSG_NOTICE([ Header files = ${includedir}]) AC_MSG_NOTICE([ Arch-independent files = ${datadir}]) AC_MSG_NOTICE([ State information = ${localstatedir}]) AC_MSG_NOTICE([ System configuration = ${sysconfdir}]) AC_MSG_NOTICE([ OCF agents = ${OCF_ROOT_DIR}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_NOTICE([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ CFLAGS = ${CFLAGS}]) AC_MSG_NOTICE([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_NOTICE([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_NOTICE([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_NOTICE([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_NOTICE([ Libraries = ${LIBS}]) AC_MSG_NOTICE([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_NOTICE([ Unix socket auth method = ${us_auth}]) diff --git a/cts/CTStests.py b/cts/CTStests.py index 3f6c7b123b..af73ffd5fb 100644 --- a/cts/CTStests.py +++ b/cts/CTStests.py @@ -1,3124 +1,3122 @@ """ Test-specific classes for Pacemaker's Cluster Test Suite (CTS) """ __copyright__ = "Copyright 2000-2021 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import os import re import time import subprocess import tempfile from stat import * from cts import CTS from cts.CTSaudits import * from cts.CTSvars import * from cts.patterns import PatternSelector from cts.logging import LogFactory from cts.remote import RemoteFactory, input_wrapper from cts.watcher import LogWatcher from cts.environment import EnvFactory AllTestClasses = [ ] class CTSTest(object): ''' A Cluster test. We implement the basic set of properties and behaviors for a generic cluster test. Cluster tests track their own statistics. We keep each of the kinds of counts we track as separate {name,value} pairs. ''' def __init__(self, cm): #self.name="the unnamed test" self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} # if not issubclass(cm.__class__, ClusterManager): # raise ValueError("Must be a ClusterManager object") self.CM = cm self.Env = EnvFactory().getInstance() self.rsh = RemoteFactory().getInstance() self.logger = LogFactory() self.templates = PatternSelector(cm["Name"]) self.Audits = [] self.timeout = 120 self.passed = 1 self.is_loop = 0 self.is_unsafe = 0 self.is_docker_unsafe = 0 self.is_experimental = 0 self.is_container = 0 self.is_valgrind = 0 self.benchmark = 0 # which tests to benchmark self.timer = {} # timers def log(self, args): self.logger.log(args) def debug(self, args): self.logger.debug(args) def has_key(self, key): return key in self.Stats def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): if str(key) == "0": raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead") if key in self.Stats: return self.Stats[key] return None def log_mark(self, msg): self.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) return def get_timer(self,key = "test"): try: return self.timer[key] except: return 0 def set_timer(self,key = "test"): self.timer[key] = time.time() return self.timer[key] def log_timer(self,key = "test"): elapsed = 0 if key in self.timer: elapsed = time.time() - self.timer[key] s = key == "test" and self.name or "%s:%s" % (self.name,key) self.debug("%s runtime: %.2f" % (s, elapsed)) del self.timer[key] return elapsed def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 # Reset the test passed boolean if name == "calls": self.passed = 1 def failure(self, reason="none"): '''Increment the failure count''' self.passed = 0 self.incr("failure") self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason) return None def success(self): '''Increment the success count''' self.incr("success") return 1 def skipped(self): '''Increment the skipped count''' self.incr("skipped") return 1 def __call__(self, node): '''Perform the given test''' raise ValueError("Abstract Class member (__call__)") self.incr("calls") return self.failure() def audit(self): passed = 1 if len(self.Audits) > 0: for audit in self.Audits: if not audit(): self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) self.incr("auditfail") passed = 0 return passed def setup(self, node): '''Setup the given test''' return self.success() def teardown(self, node): '''Tear down the given test''' return self.success() def create_watch(self, patterns, timeout, name=None): if not name: name = self.name return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"]) def local_badnews(self, prefix, watch, local_ignore=[]): errcount = 0 if not prefix: prefix = "LocalBadNews:" ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append(prefix) ignorelist.extend(local_ignore) while errcount < 100: match = watch.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.logger.log(prefix + " " + match) errcount = errcount + 1 else: break else: self.logger.log("Too many errors!") watch.end() return errcount def is_applicable(self): return self.is_applicable_common() def is_applicable_common(self): '''Return TRUE if we are applicable in the current test configuration''' #raise ValueError("Abstract Class member (is_applicable)") if self.is_loop and not self.Env["loop-tests"]: return 0 elif self.is_unsafe and not self.Env["unsafe-tests"]: return 0 elif self.is_valgrind and not self.Env["valgrind-tests"]: return 0 elif self.is_experimental and not self.Env["experimental-tests"]: return 0 elif self.is_docker_unsafe and self.Env["docker"]: return 0 elif self.is_container and not self.Env["container-tests"]: return 0 elif self.Env["benchmark"] and self.benchmark == 0: return 0 return 1 def find_ocfs2_resources(self, node): self.r_o2cb = None self.r_ocfs2 = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "o2cb" and r.parent != "NA": self.debug("Found o2cb: %s" % self.r_o2cb) self.r_o2cb = r.parent if re.search("^Constraint", line): c = AuditConstraint(self.CM, line) if c.type == "rsc_colocation" and c.target == self.r_o2cb: self.r_ocfs2.append(c.rsc) self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) return len(self.r_ocfs2) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' return 1 def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] class StopTest(CTSTest): '''Stop (deactivate) the cluster manager on a node''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stop" def __call__(self, node): '''Perform the 'stop' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] != "up": return self.skipped() patterns = [] # Technically we should always be able to notice ourselves stopping patterns.append(self.templates["Pat:We_stopped"] % node) # Any active node needs to notice this one left # (note that this won't work if we have multiple partitions) for other in self.Env["nodes"]: if self.CM.ShouldBeStatus[other] == "up" and other != node: patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) #self.debug("Checking %s will notice %s left"%(other, node)) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() if node == self.CM.OurNode: self.incr("us") else: if self.CM.upcount() <= 1: self.incr("all") else: self.incr("them") self.CM.StopaCM(node) watch_result = watch.lookforall() failreason = None UnmatchedList = "||" if watch.unmatched: (rc, output) = self.rsh(node, "/bin/ps axf", None) for line in output: self.debug(line) (rc, output) = self.rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", None) for line in output: self.debug(line) for regex in watch.unmatched: self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex)) UnmatchedList += regex + "||"; failreason = "Missing shutdown pattern" self.CM.cluster_stable(self.Env["DeadTime"]) if not watch.unmatched or self.CM.upcount() == 0: return self.success() if len(watch.unmatched) >= self.CM.upcount(): return self.failure("no match against (%s)" % UnmatchedList) if failreason == None: return self.success() else: return self.failure(failreason) # # We don't register StopTest because it's better when called by # another test... # class StartTest(CTSTest): '''Start (activate) the cluster manager on a node''' def __init__(self, cm, debug=None): CTSTest.__init__(self,cm) self.name = "start" self.debug = debug def __call__(self, node): '''Perform the 'start' test. ''' self.incr("calls") if self.CM.upcount() == 0: self.incr("us") else: self.incr("them") if self.CM.ShouldBeStatus[node] != "down": return self.skipped() elif self.CM.StartaCM(node): return self.success() else: return self.failure("Startup %s on node %s failed" % (self.Env["Name"], node)) # # We don't register StartTest because it's better when called by # another test... # class FlipTest(CTSTest): '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Flip" self.start = StartTest(cm) self.stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] == "up": self.incr("stopped") ret = self.stop(node) type = "up->down" # Give the cluster time to recognize it's gone... time.sleep(self.Env["StableTime"]) elif self.CM.ShouldBeStatus[node] == "down": self.incr("started") ret = self.start(node) type = "down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) class RestartTest(CTSTest): '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Restart" self.start = StartTest(cm) self.stop = StopTest(cm) self.benchmark = 1 def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 if self.CM.StataCM(node): self.incr("WasStopped") if not self.start(node): return self.failure("start (setup) failure: "+node) self.set_timer() if not self.stop(node): return self.failure("stop failure: "+node) if not self.start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) class StonithdTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stonithd" self.startall = SimulStartLite(cm) self.benchmark = 1 def __call__(self, node): self.incr("calls") if len(self.Env["nodes"]) < 2: return self.skipped() ret = self.startall(None) if not ret: return self.failure("Setup failed") is_dc = self.CM.is_node_dc(node) watchpats = [] watchpats.append(self.templates["Pat:FenceOpOK"] % node) watchpats.append(self.templates["Pat:NodeFenced"] % node) if self.Env["at-boot"] == 0: self.debug("Expecting %s to stay down" % node) self.CM.ShouldBeStatus[node] = "down" else: self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"])) watchpats.append("%s.* S_STARTING -> S_PENDING" % node) watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node) watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() origin = self.Env.RandomGen.choice(self.Env["nodes"]) rc = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node) if rc == 194: # 194 - 256 = -62 = Timer expired # # Look for the patterns, usually this means the required # device was running on the node to be fenced - or that # the required devices were in the process of being loaded # and/or moved # # Effectively the node committed suicide so there will be # no confirmation, but pacemaker should be watching and # fence the node again self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node)) elif origin != node and rc != 0: self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc)) elif origin == node and rc != 255: # 255 == broken pipe, ie. the node was fenced as expected self.logger.log("Locally originated fencing returned %d" % rc) self.set_timer("fence") matched = watch.lookforall() self.log_timer("fence") self.set_timer("reform") if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") self.log_timer("reform") return self.success() def errorstoignore(self): return [ self.templates["Pat:Fencing_start"] % ".*", self.templates["Pat:Fencing_ok"] % ".*", self.templates["Pat:Fencing_active"], r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired", ] def is_applicable(self): if not self.is_applicable_common(): return 0 if "DoFencing" in list(self.Env.keys()): return self.Env["DoFencing"] return 1 AllTestClasses.append(StonithdTest) class StartOnebyOne(CTSTest): '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StartOnebyOne" self.stopall = SimulStopLite(cm) self.start = StartTest(cm) self.ns = CTS.NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) class SimulStart(CTSTest): '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStart" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") if not self.startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) class SimulStop(CTSTest): '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStop" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) class StopOnebyOne(CTSTest): '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StopOnebyOne" self.startall = SimulStartLite(cm) self.stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) class RestartOnebyOne(CTSTest): '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RestartOnebyOne" self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") did_fail = [] self.set_timer() self.restart = RestartTest(self.CM) for node in self.Env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" % (len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) class PartialStart(CTSTest): '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "PartialStart" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) self.stop = StopTest(cm) #self.is_unsafe = 1 def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") watchpats = [] watchpats.append("pacemaker-controld.*Connecting to .* cluster infrastructure") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.CM.StartaCMnoBlock(node) ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) ret = self.stop(node) if not ret: return self.failure("%s did not stop in time" % node) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # We might do some fencing in the 2-node case if we make it up far enough return [ r"Executing reboot fencing operation", r"Requesting fencing \([^)]+\) of node ", ] # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) class StandbyTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Standby" self.benchmark = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") self.debug("Make sure node %s is active" % node) if self.CM.StandbyStatus(node) != "off": if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.debug("Getting resources running on node %s" % node) rsc_on_node = self.CM.active_resources(node) watchpats = [] watchpats.append(r"State transition .* -> S_POLICY_ENGINE") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.debug("Setting node %s to standby mode" % node) if not self.CM.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) self.set_timer("on") ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.CM.SetStandbyMode(node, "off") return self.failure("cluster didn't react to standby change on %s" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.log_timer("on") self.debug("Checking resources") bad_run = self.CM.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.debug("Setting node %s to active mode" % node) self.CM.SetStandbyMode(node, "off") return rc self.debug("Setting node %s to active mode" % node) if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.set_timer("off") self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.log_timer("off") return self.success() AllTestClasses.append(StandbyTest) class ValgrindTest(CTSTest): '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Valgrind" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_valgrind = 1 self.is_loop = 1 def setup(self, node): self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind, # and clear any valgrind logs from previous runs. For now, we rely on # the user to do this manually. ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") return self.success() def teardown(self, node): # Return all nodes to normal # @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks # (no longer used but kept in case feature is restored) leaked = [] self.stop = StopTest(self.CM) for node in self.Env["nodes"]: rc = self.stop(node) if not rc: self.failure("Couldn't shut down %s" % node) rc = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat, 0) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) (rc, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, None) for line in output: self.logger.log(line) (rc, output) = self.rsh(node, "cat %s" % self.logger.logPat, None) for line in output: self.debug(line) self.rsh(node, "rm -f %s" % self.logger.logPat, None) return leaked def __call__(self, node): #leaked = self.find_leaks() #if len(leaked) > 0: # return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*", r"pacemaker-based.*: .* avoid confusing Valgrind", r"HA_VALGRIND_ENABLED", ] class StandbyLoopTest(ValgrindTest): '''Check for memory leaks by putting a node in and out of standby for an hour''' # @TODO This is not a useful test for memory leaks def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name = "StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 done = time.time() + self.Env["loop-minutes"] * 60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) if not self.CM.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) if not self.CM.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() #AllTestClasses.append(StandbyLoopTest) class BandwidthTest(CTSTest): # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which the cluster uses''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Bandwidth" self.start = StartTest(cm) self.__setitem__("min",0) self.__setitem__("max",0) self.__setitem__("totalbandwidth",0) (handle, self.tempfile) = tempfile.mkstemp(".cts") os.close(handle) self.startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") if self.CM.upcount() < 1: return self.skipped() Path = self.CM.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) ret = self.startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) rc = self.rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) self.rsh.cp(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: self.logger.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) self.logger.log("...bandwidth: %d bits/sec" % intband) self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth if self.Stats["min"] == 0: self.Stats["min"] = Bandwidth if Bandwidth > self.Stats["max"]: self.Stats["max"] = Bandwidth if Bandwidth < self.Stats["min"]: self.Stats["min"] = Bandwidth self.rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count + 1 linesplit = line.split(" ") for j in range(len(linesplit)-1): if linesplit[j] == "udp": break if linesplit[j] == "length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = T1.split(":") time2split = timesplit[2].split(".") time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = line.split(" ") for j in range(len(linessplit)-1): if linessplit[j] == "udp": break if linessplit[j] == "length:": break try: sum = int(linessplit[j+1]) + sum except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = T2.split(":") time2split = timesplit[2].split(".") time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return int((sum*8)/time) def is_applicable(self): '''BandwidthTest never applicable''' return 0 AllTestClasses.append(BandwidthTest) ################################################################### class MaintenanceMode(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "MaintenanceMode" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 #self.is_unsafe = 1 self.benchmark = 1 self.action = "asyncmon" self.interval = 0 self.rid = "maintenanceDummy" def toggleMaintenanceMode(self, node, action): pats = [] pats.append(self.templates["Pat:DC_IDLE"]) # fail the resource right after turning Maintenance mode on # verify it is not recovered until maintenance mode is turned off if action == "On": pats.append(self.templates["Pat:RscOpFail"] % (self.action, self.rid)) else: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) watch = self.create_watch(pats, 60) watch.setwatch() self.debug("Turning maintenance mode %s" % action) self.rsh(node, self.templates["MaintenanceMode%s" % (action)]) if (action == "On"): self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover%s" % (action)) watch.lookforall() self.log_timer("recover%s" % (action)) if watch.unmatched: self.debug("Failed to find patterns when turning maintenance mode %s" % action) return repr(watch.unmatched) return "" def insertMaintenanceDummy(self, node): pats = [] pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid))) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.AddDummyRsc(node, self.rid) self.set_timer("addDummy") watch.lookforall() self.log_timer("addDummy") if watch.unmatched: self.debug("Failed to find patterns when adding maintenance dummy resource") return repr(watch.unmatched) return "" def removeMaintenanceDummy(self, node): pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.RemoveDummyRsc(node, self.rid) self.set_timer("removeDummy") watch.lookforall() self.log_timer("removeDummy") if watch.unmatched: self.debug("Failed to find patterns when removing maintenance dummy resource") return repr(watch.unmatched) return "" def managedRscList(self, node): rscList = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.managed(): rscList.append(tmp.id) return rscList def verifyResources(self, node, rscList, managed): managedList = list(rscList) managed_str = "managed" if not managed: managed_str = "unmanaged" (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if managed and not tmp.managed(): continue elif not managed and tmp.managed(): continue elif managedList.count(tmp.id): managedList.remove(tmp.id) if len(managedList) == 0: self.debug("Found all %s resources on %s" % (managed_str, node)) return True self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList)) return False def __call__(self, node): '''Perform the 'MaintenanceMode' test. ''' self.incr("calls") verify_managed = False verify_unmanaged = False failPat = "" ret = self.startall(None) if not ret: return self.failure("Setup failed") # get a list of all the managed resources. We use this list # after enabling maintenance mode to verify all managed resources # become un-managed. After maintenance mode is turned off, we use # this list to verify all the resources become managed again. managedResources = self.managedRscList(node) if len(managedResources) == 0: self.logger.log("No managed resources on %s" % node) return self.skipped() # insert a fake resource we can fail during maintenance mode # so we can verify recovery does not take place until after maintenance # mode is disabled. failPat = failPat + self.insertMaintenanceDummy(node) # toggle maintenance mode ON, then fail dummy resource. failPat = failPat + self.toggleMaintenanceMode(node, "On") # verify all the resources are now unmanaged if self.verifyResources(node, managedResources, False): verify_unmanaged = True # Toggle maintenance mode OFF, verify dummy is recovered. failPat = failPat + self.toggleMaintenanceMode(node, "Off") # verify all the resources are now managed again if self.verifyResources(node, managedResources, True): verify_managed = True # Remove our maintenance dummy resource. failPat = failPat + self.removeMaintenanceDummy(node) self.CM.cluster_stable() if failPat != "": return self.failure("Unmatched patterns: %s" % (failPat)) elif verify_unmanaged is False: return self.failure("Failed to verify resources became unmanaged during maintenance mode") elif verify_managed is False: return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"schedulerd.*: Recover %s\s*\(.*\)" % self.rid, r"Unknown operation: fail", self.templates["Pat:RscOpOK"] % (self.action, self.rid), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(MaintenanceMode) class ResourceRecover(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ResourceRecover" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 self.rid = None self.rid_alt = None #self.is_unsafe = 1 self.benchmark = 1 # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed") resourcelist = self.CM.active_resources(node) # if there are no resourcelist, return directly if len(resourcelist) == 0: self.logger.log("No active resources on %s" % node) return self.skipped() self.rid = self.Env.RandomGen.choice(resourcelist) self.rid_alt = self.rid rsc = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.id == self.rid: rsc = tmp # Handle anonymous clones that get renamed self.rid = rsc.clone_id break if not rsc: return self.failure("Could not find %s in the resource list" % self.rid) self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) pats = [] pats.append(self.templates["Pat:CloneOpFail"] % (self.action, rsc.id, rsc.clone_id)) if rsc.managed(): pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) if rsc.unique(): pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) else: # Anonymous clones may get restarted with a different clone number pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) watch = self.create_watch(pats, 60) watch.setwatch() self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover") watch.lookforall() self.log_timer("recover") self.CM.cluster_stable() recovered = self.CM.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique() and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.debug("%s is running on: %s" % (self.rid, repr(recovered))) elif rsc.managed(): return self.failure("%s was not recovered and is inactive" % self.rid) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"schedulerd.*: Recover (%s|%s)\s*\(.*\)" % (self.rid, self.rid_alt), r"Unknown operation: fail", self.templates["Pat:RscOpOK"] % (self.action, self.rid), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(ResourceRecover) class ComponentFail(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ComponentFail" # TODO make this work correctly in docker. self.is_docker_unsafe = 1 self.startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] self.is_unsafe = 1 def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.CM.cluster_stable(self.Env["StableTime"]): return self.failure("Setup failed - unstable") node_is_dc = self.CM.is_node_dc(node, None) # select a component to kill chosen = self.Env.RandomGen.choice(self.complist) while chosen.dc_only == 1 and node_is_dc == 0: chosen = self.Env.RandomGen.choice(self.complist) self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot)) self.incr(chosen.name) if chosen.name != "corosync": self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) # @TODO this should be a flag in the Component if chosen.name in [ "corosync", "pacemaker-based", "pacemaker-fenced" ]: # Ignore actions for fence devices if fencer will respawn # (their registration will be lost, and probes will fail) self.okerrpatterns = [ self.templates["Pat:Fencing_active"] ] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id) self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id) # supply a copy so self.patterns doesn't end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append(self.templates["Pat:Fencing_ok"] % node) stonith = self.create_watch(stonithPats, 0) stonith.setwatch() # set the watch for stable watch = self.create_watch( tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() # kill the component chosen.kill(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for any fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") self.CM.cluster_stable(self.Env["StartTime"]) self.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.debug("Found: " + repr(shot)) self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node) if self.Env["at-boot"] == 0: self.CM.ShouldBeStatus[node] = "down" # If fencing occurred, chances are many (if not all) the expected logs # will not be sent - or will be lost when the node reboots return self.success() # check for logs indicating a graceful recovery matched = watch.lookforall(allow_multiple_matches=1) if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected %s patterns" % chosen.name) elif not is_stable: return self.failure("Cluster did not become stable after killing %s" % chosen.name) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Note that okerrpatterns refers to the last time we ran this test # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) class SplitBrainTest(CTSTest): '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.is_experimental = 1 def isolate_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.debug("Creating partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: if not self.CM.isolate_node(node, other_nodes): self.logger.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"])) if len(other_nodes) == 0: return 1 self.debug("Healing partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: self.CM.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") self.passed = 1 partitions = {} ret = self.startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} p_max = len(self.Env["nodes"]) for node in self.Env["nodes"]: p = self.Env.RandomGen.randint(1, p_max) if not p in partitions: partitions[p] = [] partitions[p].append(node) p_max = len(list(partitions.keys())) if p_max > 1: break # else, try again self.debug("Created %d partitions" % p_max) for key in list(partitions.keys()): self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now self.rsh(node, "crm_attribute -V -n stonith-enabled -v false") for key in list(partitions.keys()): self.isolate_partition(partitions[key]) count = 30 while count > 0: if len(self.CM.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability if not self.CM.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state self.CM.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") self.CM.partitions_expected = 1 # And heal them again for key in list(partitions.keys()): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: if len(self.CM.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] partitions = self.CM.find_partitions() if len(partitions) > 0: members = partitions[0].split() if len(members) != len(self.Env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state if not self.CM.cluster_stable(1200): self.failure("Reformed cluster not stable") if self.Env["continue"] == 1: answer = "Y" else: try: answer = input_wrapper('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on if self.Env["DoFencing"]: self.rsh(node, "crm_attribute -V -D -n stonith-enabled") self.CM.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [ r"Another DC detected:", r"(ERROR|error).*: .*Application of an update diff failed", r"pacemaker-controld.*:.*not in our membership list", r"CRIT:.*node.*returning after partition", ] def is_applicable(self): if not self.is_applicable_common(): return 0 return len(self.Env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) class Reattach(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Reattach" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) self.is_unsafe = 0 # Handled by canrunnow() def _is_managed(self, node): is_managed = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", 1) is_managed = is_managed[:-1] # Strip off the newline return is_managed == "true" def _set_unmanaged(self, node): self.debug("Disable resource management") self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false") def _set_managed(self, node): self.debug("Re-enable resource management") self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D") def setup(self, node): attempt = 0 if not self.startall(None): return None # Make sure we are really _really_ stable and that all # resources, including those that depend on transient node # attributes, are started while not self.CM.cluster_stable(double_check=True): if attempt < 5: attempt += 1 self.debug("Not stable yet, re-testing") else: self.logger.log("Cluster is not stable") return None return 1 def teardown(self, node): # Make sure 'node' is up start = StartTest(self.CM) start(node) if not self._is_managed(node): self.logger.log("Attempting to re-enable resource management on %s" % node) self._set_managed(node) self.CM.cluster_stable() if not self._is_managed(node): self.logger.log("Could not re-enable resource management") return 0 return 1 def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' if self.find_ocfs2_resources(node): self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present") return 0 return 1 def __call__(self, node): self.incr("calls") pats = [] # Conveniently, the scheduler will display this message when disabling # management, even if fencing is not enabled, so we can rely on it. managed = self.create_watch(["Delaying fencing operations"], 60) managed.setwatch() self._set_unmanaged(node) if not managed.lookforall(): self.logger.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*")) watch = self.create_watch(pats, 60, "ShutdownActivity") watch.setwatch() self.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self._set_managed(node) return self.failure("Couldn't shut down the cluster") self.debug("Bringing the cluster back up") ret = self.startall(None) time.sleep(5) # allow ping to update the CIB if not ret: self._set_managed(node) return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self._set_managed(node) return self.failure("Resources stopped or started during cluster restart") watch = self.create_watch(pats, 60, "StartupActivity") watch.setwatch() # Re-enable resource management (and verify it happened). self._set_managed(node) self.CM.cluster_stable() if not self._is_managed(node): return self.failure("Could not re-enable resource management") # Ignore actions for STONITH resources ignore = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.debug("Ignoring start actions for %s" % r.id) ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id)) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"resource( was|s were) active at shutdown", ] def is_applicable(self): return 1 AllTestClasses.append(Reattach) class SpecialTest1(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SpecialTest1" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Could not stop all nodes") # Test config recovery when the other nodes come up self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*") # Start the selected node ret = self.restart1(node) if not ret: return self.failure("Could not start "+node) # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Could not start the remaining nodes") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Errors that occur as a result of the CIB being wiped return [ r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed", r"error.*: Resource start-up disabled since no STONITH resources have been defined", r"error.*: Either configure some or disable STONITH with the stonith-enabled option", r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity", ] AllTestClasses.append(SpecialTest1) class HAETest(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "HAETest" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_loop = 1 def setup(self, node): # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active = 0 (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.debug("Resource %s is still inactive" % resource) elif rc == 234: self.logger.log("Unknown resource %s" % resource) return 0 elif rc == 246: self.logger.log("Cluster is inactive") return 0 elif rc != 0: self.logger.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "controld" and r.parent != "NA": self.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None self.r_o2cb = None self.r_ocfs2 = [] if self.find_dlm(node): self.find_ocfs2_resources(node) def is_applicable(self): if not self.is_applicable_common(): return 0 if self.Env["Schema"] == "hae": return 1 return None class HAERoleTest(HAETest): def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name = "HAERoleTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) class HAEStandbyTest(HAETest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name = "HAEStandbyTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) class NearQuorumPointTest(CTSTest): ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint") #decide what to do with each node for node in self.Env["nodes"]: action = self.Env.RandomGen.choice(["start","stop"]) #action = self.Env.RandomGen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.debug("start nodes:" + repr(startset)) self.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: if self.CM.ShouldBeStatus[node] == "up": watchpats.append(self.templates["Pat:We_stopped"] % node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": #watchpats.append(self.templates["Pat:NonDC_started"] % node) watchpats.append(self.templates["Pat:Local_started"] % node) else: for stopping in stopset: if self.CM.ShouldBeStatus[stopping] == "up": watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() #begin actions for node in stopset: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) #get the result if watch.lookforall(): self.CM.cluster_stable() self.CM.fencing_cleanup("NearQuorumPoint", stonith) return self.success() self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: if self.CM.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: if self.CM.StataCM(node) == 0: downnodes.append(node) self.CM.fencing_cleanup("NearQuorumPoint", stonith) if upnodes == [] and downnodes == []: self.CM.cluster_stable() # Make sure they're completely down with no residule for node in stopset: self.rsh(node, self.templates["StopCmd"]) return self.success() if len(upnodes) > 0: self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: self.logger.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() def is_applicable(self): return 1 AllTestClasses.append(NearQuorumPointTest) class RollingUpgradeTest(CTSTest): '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RollingUpgrade" self.start = StartTest(cm) self.stop = StopTest(cm) self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.Env["rpm-dir"], version) self.logger.log("Installing %s on %s with %s" % (version, node, flags)) if not self.stop(node): return self.failure("stop failure: "+node) rc = self.rsh(node, "mkdir -p %s" % target_dir) rc = self.rsh(node, "rm -f %s/*.rpm" % target_dir) (rc, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, None) for line in lines: line = line[:-1] rc = self.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir)) rc = self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) if start and not self.start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): return self.install(node, self.Env["current-version"], start) def downgrade(self, node, start=1): return self.install(node, self.Env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") for node in self.Env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) self.CM.cluster_stable() return self.success() def is_applicable(self): if not self.is_applicable_common(): return None if not "rpm-dir" in list(self.Env.keys()): return None if not "current-version" in list(self.Env.keys()): return None if not "previous-version" in list(self.Env.keys()): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) class BSC_AddResource(CTSTest): '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "AddResource" self.resource_offset = 0 self.cib_cmd = """cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() ip = self.NextIP() if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") if not self.CM.cluster_stable(self.Env["DeadTime"]): return self.failure("Unstable cluster") return self.success() def NextIP(self): ip = self.Env["IPBase"] if ":" in ip: fields = ip.rpartition(":") fields[2] = str(hex(int(fields[2], 16)+1)) print(str(hex(int(f[2], 16)+1))) else: fields = ip.rpartition('.') fields[2] = str(int(fields[2])+1) ip = fields[0] + fields[1] + fields[3]; self.Env["IPBase"] = ip return ip.strip() def make_ip_resource(self, node, id, rclass, type, ip): self.logger.log("Creating %s:%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint = """ """ % (id, id, id, id, node) rc = 0 (rc, lines) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), None) if rc != 0: self.logger.log("Constraint creation failed: %d" % rc) return None (rc, lines) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), None) if rc != 0: self.logger.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): if self.Env["DoBSC"]: return 1 return None AllTestClasses.append(BSC_AddResource) class SimulStopLite(CTSTest): '''Stop any active nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStopLite" def __call__(self, dummy): '''Perform the 'SimulStopLite' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.incr("WasStarted") watchpats.append(self.templates["Pat:We_stopped"] % node) if len(watchpats) == 0: return self.success() # Stop all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) if watch.lookforall(): # Make sure they're completely down with no residule for node in self.Env["nodes"]: self.rsh(node, self.templates["StopCmd"]) return self.success() did_fail = 0 up_nodes = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 1: did_fail = 1 up_nodes.append(node) if did_fail: return self.failure("Active nodes exist: " + repr(up_nodes)) self.logger.log("Warn: All nodes stopped but CTS didnt detect: " + repr(watch.unmatched)) return self.failure("Missing log message: "+repr(watch.unmatched)) def is_applicable(self): '''SimulStopLite is a setup test and never applicable''' return 0 class SimulStartLite(CTSTest): '''Start any stopped nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStartLite" def __call__(self, dummy): '''Perform the 'SimulStartList' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... node_list = [] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.incr("WasStopped") node_list.append(node) self.set_timer() while len(node_list) > 0: # Repeat until all nodes come up watchpats = [ ] uppat = self.templates["Pat:NonDC_started"] if self.CM.upcount() == 0: uppat = self.templates["Pat:Local_started"] watchpats.append(self.templates["Pat:DC_IDLE"]) for node in node_list: watchpats.append(uppat % node) watchpats.append(self.templates["Pat:InfraUp"] % node) watchpats.append(self.templates["Pat:PacemakerUp"] % node) # Start all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() stonith = self.CM.prepare_fencing_watcher(self.name) for node in node_list: self.CM.StartaCMnoBlock(node) watch.lookforall() node_list = self.CM.fencing_cleanup(self.name, stonith) if node_list == None: return self.failure("Cluster did not stabilize") # Remove node_list messages from watch.unmatched for node in node_list: self.logger.debug("Dealing with stonith operations for %s" % repr(node_list)) if watch.unmatched: try: watch.unmatched.remove(uppat % node) except: self.debug("Already matched: %s" % (uppat % node)) try: watch.unmatched.remove(self.templates["Pat:InfraUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node)) try: watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node)) if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Startup pattern not found: %s" %(regex)) if not self.CM.cluster_stable(): return self.failure("Cluster did not stabilize") did_fail = 0 unstable = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 0: did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstarted nodes exist: " + repr(unstable)) unstable = [] for node in self.Env["nodes"]: if not self.CM.node_stable(node): did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstable cluster nodes exist: " + repr(unstable)) return self.success() def is_applicable(self): '''SimulStartLite is a setup test and never applicable''' return 0 def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.Audits = audits result.append(bound_test) return result class RemoteLXC(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteLXC" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.num_containers = 2 self.is_container = 1 self.is_docker_unsafe = 1 self.failed = 0 self.fail_string = "" def start_lxc_simple(self, node): # restore any artifacts laying around from a previous test. self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") # generate the containers, put them in the config, add some resources to them pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers) self.set_timer("remoteSimpleInit") watch.lookforall() self.log_timer("remoteSimpleInit") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 def cleanup_lxc_simple(self, node): pats = [ ] # if the test failed, attempt to clean up the cib and libvirt environment # as best as possible if self.failed == 1: # restore libvirt and cib self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") return watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null") self.set_timer("remoteSimpleCleanup") watch.lookforall() self.log_timer("remoteSimpleCleanup") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 # cleanup libvirt self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") def __call__(self, node): '''Perform the 'RemoteLXC' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") rc = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null") if rc == 1: self.log("Environment test for lxc support failed.") return self.skipped() self.start_lxc_simple(node) self.cleanup_lxc_simple(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed == 1: return self.failure(self.fail_string) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for ping", r"schedulerd.*: Recover (ping|lxc-ms|container)\s*\(.*\)", # The orphaned lxc-ms resource causes an expected transition error # that is a result of the scheduler not having knowledge that the # promotable resource used to be a clone. As a result, it looks like that # resource is running in multiple locations when it shouldn't... But in # this instance we know why this error is occurring and that it is expected. r"Calculated [Tt]ransition .*pe-error", r"Resource lxc-ms .* is active on 2 nodes attempting recovery", r"Unknown operation: fail", r"VirtualDomain.*ERROR: Unable to determine emulator", ] AllTestClasses.append(RemoteLXC) class RemoteDriver(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = self.__class__.__name__ self.is_docker_unsafe = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.stop = StopTest(cm) self.remote_rsc = "remote-rsc" self.cib_cmd = """cibadmin -C -o %s -X '%s' """ self.reset() def reset(self): self.pcmk_started = 0 self.failed = False self.fail_string = "" self.remote_node_added = 0 self.remote_rsc_added = 0 self.remote_use_reconnect_interval = self.Env.RandomGen.choice([True,False]) def fail(self, msg): """ Mark test as failed. """ self.failed = True # Always log the failure. self.logger.log(msg) # Use first failure as test status, as it's likely to be most useful. if not self.fail_string: self.fail_string = msg def get_othernode(self, node): for othernode in self.Env["nodes"]: if othernode == node: # we don't want to try and use the cib that we just shutdown. # find a cluster node that is not our soon to be remote-node. continue else: return othernode def del_rsc(self, node, rsc): othernode = self.get_othernode(node) rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) if rc != 0: self.fail("Removal of resource '%s' failed" % rsc) def add_rsc(self, node, rsc_xml): othernode = self.get_othernode(node) rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) if rc != 0: self.fail("resource creation failed") def add_primitive_rsc(self, node): rsc_xml = """ """ % { "node": self.remote_rsc } self.add_rsc(node, rsc_xml) if not self.failed: self.remote_rsc_added = 1 def add_connection_rsc(self, node): rsc_xml = """ """ % { "node": self.remote_node, "server": node } if self.remote_use_reconnect_interval: # Set reconnect interval on resource rsc_xml = rsc_xml + """ """ % (self.remote_node) rsc_xml = rsc_xml + """ """ % { "node": self.remote_node } self.add_rsc(node, rsc_xml) if not self.failed: self.remote_node_added = 1 def disable_services(self, node): self.corosync_enabled = self.Env.service_is_enabled(node, "corosync") if self.corosync_enabled: self.Env.disable_service(node, "corosync") self.pacemaker_enabled = self.Env.service_is_enabled(node, "pacemaker") if self.pacemaker_enabled: self.Env.disable_service(node, "pacemaker") def restore_services(self, node): if self.corosync_enabled: self.Env.enable_service(node, "corosync") if self.pacemaker_enabled: self.Env.enable_service(node, "pacemaker") def stop_pcmk_remote(self, node): # disable pcmk remote for i in range(10): rc = self.rsh(node, "service pacemaker_remote stop") if rc != 0: time.sleep(6) else: break def start_pcmk_remote(self, node): for i in range(10): rc = self.rsh(node, "service pacemaker_remote start") if rc != 0: time.sleep(6) else: self.pcmk_started = 1 break def freeze_pcmk_remote(self, node): """ Simulate a Pacemaker Remote daemon failure. """ # We freeze the process. self.rsh(node, "killall -STOP pacemaker-remoted") def resume_pcmk_remote(self, node): # We resume the process. self.rsh(node, "killall -CONT pacemaker-remoted") def start_metal(self, node): # Cluster nodes are reused as remote nodes in remote tests. If cluster # services were enabled at boot, in case the remote node got fenced, the # cluster node would join instead of the expected remote one. Meanwhile # pacemaker_remote would not be able to start. Depending on the chances, # the situations might not be able to be orchestrated gracefully any more. # # Temporarily disable any enabled cluster serivces. self.disable_services(node) pcmk_started = 0 # make sure the resource doesn't already exist for some reason self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc)) self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node)) if not self.stop(node): self.fail("Failed to shutdown cluster node %s" % node) return self.start_pcmk_remote(node) if self.pcmk_started == 0: self.fail("Failed to start pacemaker_remote on node %s" % node) return # Convert node to baremetal now that it has shutdown the cluster stack pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) self.add_connection_rsc(node) self.set_timer("remoteMetalInit") watch.lookforall() self.log_timer("remoteMetalInit") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) def migrate_connection(self, node): if self.failed: return pats = [ ] pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node)) pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(pats, 120) watch.setwatch() (rc, lines) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), None) if rc != 0: self.fail("failed to move remote node connection resource") return self.set_timer("remoteMetalMigrate") watch.lookforall() self.log_timer("remoteMetalMigrate") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return def fail_rsc(self, node): if self.failed: return watchpats = [ ] watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node)) watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, 120) watch.setwatch() self.debug("causing dummy rsc to fail.") rc = self.rsh(node, "rm -f /var/run/resource-agents/Dummy*") self.set_timer("remoteRscFail") watch.lookforall() self.log_timer("remoteRscFail") if watch.unmatched: self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched) def fail_connection(self, node): if self.failed: return watchpats = [ ] watchpats.append(self.templates["Pat:FenceOpOK"] % self.remote_node) watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node) watch = self.create_watch(watchpats, 120) watch.setwatch() # freeze the pcmk remote daemon. this will result in fencing self.debug("Force stopped active remote node") self.freeze_pcmk_remote(node) self.debug("Waiting for remote node to be fenced.") self.set_timer("remoteMetalFence") watch.lookforall() self.log_timer("remoteMetalFence") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return self.debug("Waiting for the remote node to come back up") self.CM.ns.WaitForNodeToComeUp(node, 120); pats = [ ] watch = self.create_watch(pats, 240) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) # start the remote node again watch it integrate back into cluster. self.start_pcmk_remote(node) if self.pcmk_started == 0: self.fail("Failed to start pacemaker_remote on node %s" % node) return self.debug("Waiting for remote node to rejoin cluster after being fenced.") self.set_timer("remoteMetalRestart") watch.lookforall() self.log_timer("remoteMetalRestart") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return def add_dummy_rsc(self, node): if self.failed: return # verify we can put a resource on the remote node pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) # Add a resource that must live on remote-node self.add_primitive_rsc(node) # force that rsc to prefer the remote node. (rc, line) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), None) if rc != 0: self.fail("Failed to place remote resource on remote node.") return self.set_timer("remoteMetalRsc") watch.lookforall() self.log_timer("remoteMetalRsc") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) def test_attributes(self, node): if self.failed: return # This verifies permanent attributes can be set on a remote-node. It also # verifies the remote-node can edit its own cib node section remotely. (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)) return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to get remote-node attribute") return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to delete remote-node attribute") return def cleanup_metal(self, node): self.restore_services(node) if self.pcmk_started == 0: return pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc)) if self.remote_node_added == 1: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node)) self.set_timer("remoteMetalCleanup") self.resume_pcmk_remote(node) if self.remote_rsc_added == 1: # Remove dummy resource added for remote node tests self.debug("Cleaning up dummy rsc put on remote node") self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % self.remote_rsc) self.del_rsc(node, self.remote_rsc) if self.remote_node_added == 1: # Remove remote node's connection resource self.debug("Cleaning up remote node connection resource") self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % (self.remote_node)) self.del_rsc(node, self.remote_node) watch.lookforall() self.log_timer("remoteMetalCleanup") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) self.stop_pcmk_remote(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.remote_node_added == 1: # Remove remote node itself self.debug("Cleaning up node entry for remote node") self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node) def setup_env(self, node): self.remote_node = "remote-%s" % (node) # we are assuming if all nodes have a key, that it is # the right key... If any node doesn't have a remote # key, we regenerate it everywhere. if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]): return # create key locally (handle, keyfile) = tempfile.mkstemp(".cts") os.close(handle) - devnull = open(os.devnull, 'wb') subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"], - stdout=devnull, stderr=devnull) - devnull.close() + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # sync key throughout the cluster for node in self.Env["nodes"]: self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker") self.rsh.cp(keyfile, "root@%s:/etc/pacemaker/authkey" % node) self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey") self.rsh(node, "chmod 0640 /etc/pacemaker/authkey") os.unlink(keyfile) def is_applicable(self): if not self.is_applicable_common(): return False for node in self.Env["nodes"]: rc = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1") if rc != 0: return False return True def start_new_test(self, node): self.incr("calls") self.reset() ret = self.startall(None) if not ret: return self.failure("setup failed: could not start all nodes") self.setup_env(node) self.start_metal(node) self.add_dummy_rsc(node) return True def __call__(self, node): return self.failure("This base class is not meant to be called directly.") def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"""is running on remote.*which isn't allowed""", r"""Connection terminated""", r"""Could not send remote""", ] # RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses class RemoteBasic(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteBaremetal' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.test_attributes(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() AllTestClasses.append(RemoteBasic) class RemoteStonithd(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteStonithd' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.fail_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def is_applicable(self): if not RemoteDriver.is_applicable(self): return False if "DoFencing" in list(self.Env.keys()): return self.Env["DoFencing"] return True def errorstoignore(self): ignore_pats = [ r"Lost connection to Pacemaker Remote node", r"Software caused connection abort", r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor", r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*", r"schedulerd.*:\s+Recover remote-.*\s*\(.*\)", r"error: Result of monitor operation for .* on remote-.*: No executor connection", ] ignore_pats.extend(RemoteDriver.errorstoignore(self)) return ignore_pats AllTestClasses.append(RemoteStonithd) class RemoteMigrate(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteMigrate' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.migrate_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() AllTestClasses.append(RemoteMigrate) class RemoteRscFailure(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteRscFailure' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) # This is an important step. We are migrating the connection # before failing the resource. This verifies that the migration # has properly maintained control over the remote-node. self.migrate_connection(node) self.fail_rsc(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def errorstoignore(self): ignore_pats = [ r"schedulerd.*: Recover remote-rsc\s*\(.*\)", r"Dummy.*: No process state file found", ] ignore_pats.extend(RemoteDriver.errorstoignore(self)) return ignore_pats AllTestClasses.append(RemoteRscFailure) # vim:ts=4:sw=4:et: diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index 4d44a0709d..8e5eb2c9b3 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -1,1529 +1,1528 @@ #!@PYTHON@ """ Regression tests for Pacemaker's scheduler """ __copyright__ = "Copyright 2004-2021 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import re import sys import stat import shlex import shutil import argparse import subprocess import platform DESC = """Regression tests for Pacemaker's scheduler""" # Each entry in TESTS is a group of tests, where each test consists of a # test base name, test description, and additional test arguments. # Test groups will be separated by newlines in output. TESTS = [ [ [ "simple1", "Offline" ], [ "simple2", "Start" ], [ "simple3", "Start 2" ], [ "simple4", "Start Failed" ], [ "simple6", "Stop Start" ], [ "simple7", "Shutdown" ], #[ "simple8", "Stonith" ], #[ "simple9", "Lower version" ], #[ "simple10", "Higher version" ], [ "simple11", "Priority (ne)" ], [ "simple12", "Priority (eq)" ], [ "simple8", "Stickiness" ], ], [ [ "group1", "Group" ], [ "group2", "Group + Native" ], [ "group3", "Group + Group" ], [ "group4", "Group + Native (nothing)" ], [ "group5", "Group + Native (move)" ], [ "group6", "Group + Group (move)" ], [ "group7", "Group colocation" ], [ "group13", "Group colocation (cant run)" ], [ "group8", "Group anti-colocation" ], [ "group9", "Group recovery" ], [ "group10", "Group partial recovery" ], [ "group11", "Group target_role" ], [ "group14", "Group stop (graph terminated)" ], [ "group15", "Negative group colocation" ], [ "bug-1573", "Partial stop of a group with two children" ], [ "bug-1718", "Mandatory group ordering - Stop group_FUN" ], [ "bug-lf-2613", "Move group on failure" ], [ "bug-lf-2619", "Move group on clone failure" ], [ "group-fail", "Ensure stop order is preserved for partially active groups" ], [ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ], [ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ], [ "group-dependents", "Account for the location preferences of things colocated with a group" ], [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ], ], [ [ "rsc_dep1", "Must not" ], [ "rsc_dep3", "Must" ], [ "rsc_dep5", "Must not 3" ], [ "rsc_dep7", "Must 3" ], [ "rsc_dep10", "Must (but cant)" ], [ "rsc_dep2", "Must (running)" ], [ "rsc_dep8", "Must (running : alt)" ], [ "rsc_dep4", "Must (running + move)" ], [ "asymmetric", "Asymmetric - require explicit location constraints" ], ], [ [ "orphan-0", "Orphan ignore" ], [ "orphan-1", "Orphan stop" ], [ "orphan-2", "Orphan stop, remove failcount" ], ], [ [ "params-0", "Params: No change" ], [ "params-1", "Params: Changed" ], [ "params-2", "Params: Resource definition" ], [ "params-3", "Params: Restart instead of reload if start pending" ], [ "params-4", "Params: Reload" ], [ "params-5", "Params: Restart based on probe digest" ], [ "novell-251689", "Resource definition change + target_role=stopped" ], [ "bug-lf-2106", "Restart all anonymous clone instances after config change" ], [ "params-6", "Params: Detect reload in previously migrated resource" ], [ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ], [ "not-reschedule-unneeded-monitor", "Do not reschedule unneeded monitors while resource definitions have changed" ], [ "reload-becomes-restart", "Cancel reload if restart becomes required" ], ], [ [ "target-0", "Target Role : baseline" ], [ "target-1", "Target Role : master" ], [ "target-2", "Target Role : invalid" ], ], [ [ "base-score", "Set a node's default score for all nodes" ], ], [ [ "date-1", "Dates", [ "-t", "2005-020" ] ], [ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ], [ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ], [ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ], [ "probe-0", "Probe (anon clone)" ], [ "probe-1", "Pending Probe" ], [ "probe-2", "Correctly re-probe cloned groups" ], [ "probe-3", "Probe (pending node)" ], [ "probe-4", "Probe (pending node + stopped resource)" ], [ "standby", "Standby" ], [ "comments", "Comments" ], ], [ [ "one-or-more-0", "Everything starts" ], [ "one-or-more-1", "Nothing starts because of A" ], [ "one-or-more-2", "D can start because of C" ], [ "one-or-more-3", "D cannot start because of B and C" ], [ "one-or-more-4", "D cannot start because of target-role" ], [ "one-or-more-5", "Start A and F even though C and D are stopped" ], [ "one-or-more-6", "Leave A running even though B is stopped" ], [ "one-or-more-7", "Leave A running even though C is stopped" ], [ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ], [ "clone-require-all-1", "clone B starts node 3 and 4" ], [ "clone-require-all-2", "clone B remains stopped everywhere" ], [ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ], [ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ], [ "clone-require-all-5", "clone B starts on node 1 3 and 4" ], [ "clone-require-all-6", "clone B remains active after shutting down instances of A" ], [ "clone-require-all-7", "clone A and B both start at the same time. all instances of A start before B" ], [ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ], [ "clone-require-all-no-interleave-2", "C starts on nodes 1, 2, and 4 with only one active instance of B" ], [ "clone-require-all-no-interleave-3", "C remains active when instance of B is stopped on one node and started on another" ], [ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ], ], [ [ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ], [ "location-date-rules-2", "Use location constraints with effective date-based rules" ], [ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ], [ "value-source", "Use location constraints with node attribute expressions using value-source" ], [ "rule-dbl-as-auto-number-match", "Floating-point rule values default to number comparison: match" ], [ "rule-dbl-as-auto-number-no-match", "Floating-point rule values default to number comparison: no " "match" ], [ "rule-dbl-as-integer-match", "Floating-point rule values set to integer comparison: match" ], [ "rule-dbl-as-integer-no-match", "Floating-point rule values set to integer comparison: no match" ], [ "rule-dbl-as-number-match", "Floating-point rule values set to number comparison: match" ], [ "rule-dbl-as-number-no-match", "Floating-point rule values set to number comparison: no match" ], [ "rule-dbl-parse-fail-default-str-match", "Floating-point rule values fail to parse, default to string " "comparison: match" ], [ "rule-dbl-parse-fail-default-str-no-match", "Floating-point rule values fail to parse, default to string " "comparison: no match" ], [ "rule-int-as-auto-integer-match", "Integer rule values default to integer comparison: match" ], [ "rule-int-as-auto-integer-no-match", "Integer rule values default to integer comparison: no match" ], [ "rule-int-as-integer-match", "Integer rule values set to integer comparison: match" ], [ "rule-int-as-integer-no-match", "Integer rule values set to integer comparison: no match" ], [ "rule-int-as-number-match", "Integer rule values set to number comparison: match" ], [ "rule-int-as-number-no-match", "Integer rule values set to number comparison: no match" ], [ "rule-int-parse-fail-default-str-match", "Integer rule values fail to parse, default to string " "comparison: match" ], [ "rule-int-parse-fail-default-str-no-match", "Integer rule values fail to parse, default to string " "comparison: no match" ], ], [ [ "order1", "Order start 1" ], [ "order2", "Order start 2" ], [ "order3", "Order stop" ], [ "order4", "Order (multiple)" ], [ "order5", "Order (move)" ], [ "order6", "Order (move w/ restart)" ], [ "order7", "Order (mandatory)" ], [ "order-optional", "Order (score=0)" ], [ "order-required", "Order (score=INFINITY)" ], [ "bug-lf-2171", "Prevent group start when clone is stopped" ], [ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ], [ "order-sets", "Ordering for resource sets" ], [ "order-serialize", "Serialize resources without inhibiting migration" ], [ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ], [ "clone-order-primitive", "Order clone start after a primitive" ], [ "clone-order-16instances", "Verify ordering of 16 cloned resources" ], [ "order-optional-keyword", "Order (optional keyword)" ], [ "order-mandatory", "Order (mandatory keyword)" ], [ "bug-lf-2493", "Don't imply colocation requirements when applying ordering constraints with clones" ], [ "ordered-set-basic-startup", "Constraint set with default order settings" ], [ "ordered-set-natural", "Allow natural set ordering" ], [ "order-wrong-kind", "Order (error)" ], ], [ [ "coloc-loop", "Colocation - loop" ], [ "coloc-many-one", "Colocation - many-to-one" ], [ "coloc-list", "Colocation - many-to-one with list" ], [ "coloc-group", "Colocation - groups" ], [ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ], [ "coloc-attr", "Colocation based on node attributes" ], [ "coloc-negative-group", "Negative colocation with a group" ], [ "coloc-intra-set", "Intra-set colocation" ], [ "bug-lf-2435", "Colocation sets with a negative score" ], [ "coloc-clone-stays-active", "Ensure clones don't get stopped/demoted because a dependent must stop" ], [ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ], [ "colo_master_w_native", "cl#5070 - Verify promotion order is affected when colocating master to native rsc" ], [ "colo_slave_w_native", "cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ], [ "anti-colocation-order", "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ], [ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ], [ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ], [ "enforce-colo1", "Always enforce B with A INFINITY" ], [ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ], [ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ], [ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ], [ "colocation-influence", "Respect colocation influence" ], ], [ [ "rsc-sets-seq-true", "Resource Sets - sequential=false" ], [ "rsc-sets-seq-false", "Resource Sets - sequential=true" ], [ "rsc-sets-clone", "Resource Sets - Clone" ], [ "rsc-sets-master", "Resource Sets - Master" ], [ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ], ], [ [ "attrs1", "string: eq (and)" ], [ "attrs2", "string: lt / gt (and)" ], [ "attrs3", "string: ne (or)" ], [ "attrs4", "string: exists" ], [ "attrs5", "string: not_exists" ], [ "attrs6", "is_dc: true" ], [ "attrs7", "is_dc: false" ], [ "attrs8", "score_attribute" ], [ "per-node-attrs", "Per node resource parameters" ], ], [ [ "mon-rsc-1", "Schedule Monitor - start" ], [ "mon-rsc-2", "Schedule Monitor - move" ], [ "mon-rsc-3", "Schedule Monitor - pending start" ], [ "mon-rsc-4", "Schedule Monitor - move/pending start" ], ], [ [ "rec-rsc-0", "Resource Recover - no start" ], [ "rec-rsc-1", "Resource Recover - start" ], [ "rec-rsc-2", "Resource Recover - monitor" ], [ "rec-rsc-3", "Resource Recover - stop - ignore" ], [ "rec-rsc-4", "Resource Recover - stop - block" ], [ "rec-rsc-5", "Resource Recover - stop - fence" ], [ "rec-rsc-6", "Resource Recover - multiple - restart" ], [ "rec-rsc-7", "Resource Recover - multiple - stop" ], [ "rec-rsc-8", "Resource Recover - multiple - block" ], [ "rec-rsc-9", "Resource Recover - group/group" ], [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ], [ "stop-failure-no-quorum", "Stop failure without quorum" ], [ "stop-failure-no-fencing", "Stop failure without fencing available" ], [ "stop-failure-with-fencing", "Stop failure with fencing available" ], [ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ], [ "multiple-monitor-one-failed", "Consider resource failed if any of the configured monitor operations failed" ], ], [ [ "quorum-1", "No quorum - ignore" ], [ "quorum-2", "No quorum - freeze" ], [ "quorum-3", "No quorum - stop" ], [ "quorum-4", "No quorum - start anyway" ], [ "quorum-5", "No quorum - start anyway (group)" ], [ "quorum-6", "No quorum - start anyway (clone)" ], [ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ], [ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ], [ "suicide-not-needed-initial-quorum", "no-quorum-policy=suicide: suicide not necessary at initial quorum" ], [ "suicide-not-needed-never-quorate", "no-quorum-policy=suicide: suicide not necessary if never quorate" ], [ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ], ], [ [ "rec-node-1", "Node Recover - Startup - no fence" ], [ "rec-node-2", "Node Recover - Startup - fence" ], [ "rec-node-3", "Node Recover - HA down - no fence" ], [ "rec-node-4", "Node Recover - HA down - fence" ], [ "rec-node-5", "Node Recover - CRM down - no fence" ], [ "rec-node-6", "Node Recover - CRM down - fence" ], [ "rec-node-7", "Node Recover - no quorum - ignore" ], [ "rec-node-8", "Node Recover - no quorum - freeze" ], [ "rec-node-9", "Node Recover - no quorum - stop" ], [ "rec-node-10", "Node Recover - no quorum - stop w/fence" ], [ "rec-node-11", "Node Recover - CRM down w/ group - fence" ], [ "rec-node-12", "Node Recover - nothing active - fence" ], [ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ], [ "rec-node-15", "Node Recover - unknown lrm section" ], [ "rec-node-14", "Serialize all stonith's" ], ], [ [ "multi1", "Multiple Active (stop/start)" ], ], [ [ "migrate-begin", "Normal migration" ], [ "migrate-success", "Completed migration" ], [ "migrate-partial-1", "Completed migration, missing stop on source" ], [ "migrate-partial-2", "Successful migrate_to only" ], [ "migrate-partial-3", "Successful migrate_to only, target down" ], [ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ], [ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ], [ "migrate-fail-2", "Failed migrate_from" ], [ "migrate-fail-3", "Failed migrate_from + stop on source" ], [ "migrate-fail-4", "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-5", "Failed migrate_from + stop on source and target" ], [ "migrate-fail-6", "Failed migrate_to" ], [ "migrate-fail-7", "Failed migrate_to + stop on source" ], [ "migrate-fail-8", "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-9", "Failed migrate_to + stop on source and target" ], [ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ], [ "migrate-stop", "Migration in a stopping stack" ], [ "migrate-start", "Migration in a starting stack" ], [ "migrate-stop_start", "Migration in a restarting stack" ], [ "migrate-stop-complex", "Migration in a complex stopping stack" ], [ "migrate-start-complex", "Migration in a complex starting stack" ], [ "migrate-stop-start-complex", "Migration in a complex moving stack" ], [ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ], [ "migrate-1", "Migrate (migrate)" ], [ "migrate-2", "Migrate (stable)" ], [ "migrate-3", "Migrate (failed migrate_to)" ], [ "migrate-4", "Migrate (failed migrate_from)" ], [ "novell-252693", "Migration in a stopping stack" ], [ "novell-252693-2", "Migration in a starting stack" ], [ "novell-252693-3", "Non-Migration in a starting and stopping stack" ], [ "bug-1820", "Migration in a group" ], [ "bug-1820-1", "Non-migration in a group" ], [ "migrate-5", "Primitive migration with a clone" ], [ "migrate-fencing", "Migration after Fencing" ], [ "migrate-both-vms", "Migrate two VMs that have no colocation" ], [ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ], [ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ], [ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ], [ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ], [ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ], [ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ], [ "6-migrate-group", "Advanced migrate logic, migrate a group" ], [ "7-migrate-group-one-unmigratable", "Advanced migrate logic, migrate group mixed with allow-migrate true/false" ], [ "8-am-then-bm-a-migrating-b-stopping", "Advanced migrate logic, A then B, A migrating, B stopping" ], [ "9-am-then-bm-b-migrating-a-stopping", "Advanced migrate logic, A then B, B migrate, A stopping" ], [ "10-a-then-bm-b-move-a-clone", "Advanced migrate logic, A clone then B, migrate B while stopping A" ], [ "11-a-then-bm-b-move-a-clone-starting", "Advanced migrate logic, A clone then B, B moving while A is start/stopping" ], [ "a-promote-then-b-migrate", "A promote then B start. migrate B" ], [ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ], # @TODO: If pacemaker implements versioned attributes, uncomment this test #[ "migrate-versioned", "Disable migration for versioned resources" ], [ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ], ], [ [ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ], [ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ], [ "clone-anon-failcount", "Merge failcounts for anonymous clones" ], [ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ], [ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ], [ "inc0", "Incarnation start" ], [ "inc1", "Incarnation start order" ], [ "inc2", "Incarnation silent restart, stop, move" ], [ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ], [ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ], [ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ], [ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ], [ "inc7", "Clone colocation" ], [ "inc8", "Clone anti-colocation" ], [ "inc9", "Non-unique clone" ], [ "inc10", "Non-unique clone (stop)" ], [ "inc11", "Primitive colocation with clones" ], [ "inc12", "Clone shutdown" ], [ "cloned-group", "Make sure only the correct number of cloned groups are started" ], [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ], [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ], [ "clone-max-zero", "Orphan processing with clone-max=0" ], [ "clone-anon-dup", "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ], [ "bug-lf-2160", "Don't shuffle clones due to colocation" ], [ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ], [ "bug-lf-2153", "Clone ordering constraints" ], [ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ], [ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ], [ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ], [ "clone-colocate-instance-2", "Colocation with a specific clone instance" ], [ "clone-order-instance", "Ordering with specific clone instances" ], [ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ], [ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ], [ "bug-lf-2544", "Balanced clone placement" ], [ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ], [ "bug-lf-2574", "Avoid clone shuffle" ], [ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ], [ "bug-cl-5168", "Don't shuffle clones" ], [ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ], [ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ], [ "clone-interleave-1", "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-3", "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ], [ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ], [ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ], [ "clone-requires-quorum", "Clone with requires=quorum with presumed-inactive instance on failed node" ], ], [ [ "cloned_start_one", "order first clone then clone... first clone_min=2" ], [ "cloned_start_two", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_one", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_two", "order first clone then clone... first clone_min=2" ], [ "clone_min_interleave_start_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_start_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ], ], [ [ "unfence-startup", "Clean unfencing" ], [ "unfence-definition", "Unfencing when the agent changes" ], [ "unfence-parameters", "Unfencing when the agent parameters changes" ], [ "unfence-device", "Unfencing when a cluster has only fence devices" ], ], [ [ "master-0", "Stopped -> Slave" ], [ "master-1", "Stopped -> Promote" ], [ "master-2", "Stopped -> Promote : notify" ], [ "master-3", "Stopped -> Promote : master location" ], [ "master-4", "Started -> Promote : master location" ], [ "master-5", "Promoted -> Promoted" ], [ "master-6", "Promoted -> Promoted (2)" ], [ "master-7", "Promoted -> Fenced" ], [ "master-8", "Promoted -> Fenced -> Moved" ], [ "master-9", "Stopped + Promotable + No quorum" ], [ "master-10", "Stopped -> Promotable : notify with monitor" ], [ "master-11", "Stopped -> Promote : colocation" ], [ "novell-239082", "Demote/Promote ordering" ], [ "novell-239087", "Stable master placement" ], [ "master-12", "Promotion based solely on rsc_location constraints" ], [ "master-13", "Include preferences of colocated resources when placing master" ], [ "master-demote", "Ordering when actions depends on demoting a slave resource" ], [ "master-ordering", "Prevent resources from starting that need a master" ], [ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ], [ "master-group", "Promotion of cloned groups" ], [ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ], [ "master-failed-demote", "Don't retry failed demote actions" ], [ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ], [ "master-depend", "Ensure resources that depend on the master don't get allocated until the master does" ], [ "master-reattach", "Re-attach to a running master" ], [ "master-allow-start", "Don't include master score if it would prevent allocation" ], [ "master-colocation", "Allow master instances placemaker to be influenced by colocation constraints" ], [ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ], [ "master-role", "Prevent target-role from promoting more than master-max instances" ], [ "bug-lf-2358", "Master-Master anti-colocation" ], [ "master-promotion-constraint", "Mandatory master colocation constraints" ], [ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ], [ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ], [ "master-demote-2", "Demote does not clear past failure" ], [ "master-move", "Move master based on failure of colocated group" ], [ "master-probed-score", "Observe the promotion score of probed resources" ], [ "colocation_constraint_stops_master", "cl#5054 - Ensure master is demoted when stopped by colocation constraint" ], [ "colocation_constraint_stops_slave", "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ], [ "order_constraint_stops_master", "cl#5054 - Ensure master is demoted when stopped by order constraint" ], [ "order_constraint_stops_slave", "cl#5054 - Ensure slave is not demoted when stopped by order constraint" ], [ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ], [ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ], [ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ], [ "master-demote-block", "Block promotion if demote fails with on-fail=block" ], [ "master-dependent-ban", "Don't stop instances from being active because a dependent is banned from that host" ], [ "master-stop", "Stop instances due to location constraint with role=Started" ], [ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ], [ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ], [ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ], [ "master-asymmetrical-order", "Fix the behaviors of multi-state resources with asymmetrical ordering" ], [ "master-notify", "Master promotion with notifies" ], [ "master-score-startup", "Use permanent master scores without LRM history" ], [ "failed-demote-recovery", "Recover resource in slave role after demote fails" ], [ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ], [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ], [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ], [ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ], ], [ [ "history-1", "Correctly parse stateful-1 resource state" ], ], [ [ "managed-0", "Managed (reference)" ], [ "managed-1", "Not managed - down" ], [ "managed-2", "Not managed - up" ], [ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ], [ "bug-5028-detach", "Ensure detach still works" ], [ "bug-5028-bottom", "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ], [ "unmanaged-stop-1", "cl#5155 - Block the stop of resources if any depending resource is unmanaged" ], [ "unmanaged-stop-2", "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ], [ "unmanaged-stop-3", "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ], [ "unmanaged-stop-4", "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ], [ "unmanaged-block-restart", "Block restart of resources if any dependent resource in a group is unmanaged" ], ], [ [ "interleave-0", "Interleave (reference)" ], [ "interleave-1", "coloc - not interleaved" ], [ "interleave-2", "coloc - interleaved" ], [ "interleave-3", "coloc - interleaved (2)" ], [ "interleave-pseudo-stop", "Interleaved clone during stonith" ], [ "interleave-stop", "Interleaved clone during stop" ], [ "interleave-restart", "Interleaved clone during dependency restart" ], ], [ [ "notify-0", "Notify reference" ], [ "notify-1", "Notify simple" ], [ "notify-2", "Notify simple, confirm" ], [ "notify-3", "Notify move, confirm" ], [ "novell-239079", "Notification priority" ], #[ "notify-2", "Notify - 764" ], [ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ], [ "route-remote-notify", "Route remote notify actions through correct cluster node" ], [ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ], ], [ [ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ], [ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ], [ "696", "OSDL #696 - CRM starts stonith RA without monitor" ], [ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ], [ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ], [ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ], [ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ], [ "829", "OSDL #829" ], [ "994", "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ], [ "994-2", "OSDL #994 - with a dependent resource" ], [ "1360", "OSDL #1360 - Clone stickiness" ], [ "1484", "OSDL #1484 - on_fail=stop" ], [ "1494", "OSDL #1494 - Clone stability" ], [ "unrunnable-1", "Unrunnable" ], [ "unrunnable-2", "Unrunnable 2" ], [ "stonith-0", "Stonith loop - 1" ], [ "stonith-1", "Stonith loop - 2" ], [ "stonith-2", "Stonith loop - 3" ], [ "stonith-3", "Stonith startup" ], [ "stonith-4", "Stonith node state" ], [ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ], [ "bug-1572-1", "Recovery of groups depending on master/slave" ], [ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ], [ "bug-1685", "Depends-on-master ordering" ], [ "bug-1822", "Don't promote partially active groups" ], [ "bug-pm-11", "New resource added to a m/s group" ], [ "bug-pm-12", "Recover only the failed portion of a cloned group" ], [ "bug-n-387749", "Don't shuffle clone instances" ], [ "bug-n-385265", "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ], [ "bug-n-385265-2", "Ensure groups are migrated instead of remaining partially active on the current node" ], [ "bug-lf-1920", "Correctly handle probes that find active resources" ], [ "bnc-515172", "Location constraint with multiple expressions" ], [ "colocate-primitive-with-clone", "Optional colocation with a clone" ], [ "use-after-free-merge", "Use-after-free in native_merge_weights" ], [ "bug-lf-2551", "STONITH ordering for stop" ], [ "bug-lf-2606", "Stonith implies demote" ], [ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ], [ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ], [ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ], [ "bug-5014-A-stop-B-started", "Verify when A stops B does not stop if it has already started using symmetric=false" ], [ "bug-5014-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ], [ "bug-5014-CthenAthenB-C-stopped", "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ], [ "bug-5014-CLONE-A-start-B-start", "Verify when A starts B starts using clone resources with symmetric=false" ], [ "bug-5014-CLONE-A-stop-B-started", "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ], [ "bug-5014-GROUP-A-start-B-start", "Verify when A starts B starts when using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-started", "Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ], [ "bug-5014-ordered-set-symmetrical-false", "Verify ordered sets work with symmetrical=false" ], [ "bug-5014-ordered-set-symmetrical-true", "Verify ordered sets work with symmetrical=true" ], [ "bug-5007-masterslave_colocation", "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ], [ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ], [ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ], [ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ], [ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ], [ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ], [ "failcount", "Ensure failcounts are correctly expired" ], [ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ], [ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ], [ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ], [ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ], [ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ], [ "bug-5059", "No need to restart p_stateful1:*" ], [ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ], [ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ], [ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ], [ "expire-non-blocked-failure", "Ignore failure-timeout only if the failed operation has on-fail=block" ], [ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ], [ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ], [ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ], [ "order-expired-failure", "Order failcount cleanup after remote fencing" ], [ "ignore_stonith_rsc_order1", "cl#5056- Ignore order constraint between stonith and non-stonith rsc" ], [ "ignore_stonith_rsc_order2", "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ], [ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ], [ "ignore_stonith_rsc_order4", "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ], [ "honor_stonith_rsc_order1", "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ], [ "honor_stonith_rsc_order2", "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ], [ "honor_stonith_rsc_order3", "cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ], [ "honor_stonith_rsc_order4", "cl#5056- Honor order constraint, between two native stonith rscs" ], [ "multiply-active-stonith", "Multiply active stonith" ], [ "probe-timeout", "cl#5099 - Default probe timeout" ], [ "order-first-probes", "cl#5301 - respect order constraints when relevant resources are being probed" ], [ "concurrent-fencing", "Allow performing fencing operations in parallel" ], [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ], ], [ [ "systemhealth1", "System Health () #1" ], [ "systemhealth2", "System Health () #2" ], [ "systemhealth3", "System Health () #3" ], [ "systemhealthn1", "System Health (None) #1" ], [ "systemhealthn2", "System Health (None) #2" ], [ "systemhealthn3", "System Health (None) #3" ], [ "systemhealthm1", "System Health (Migrate On Red) #1" ], [ "systemhealthm2", "System Health (Migrate On Red) #2" ], [ "systemhealthm3", "System Health (Migrate On Red) #3" ], [ "systemhealtho1", "System Health (Only Green) #1" ], [ "systemhealtho2", "System Health (Only Green) #2" ], [ "systemhealtho3", "System Health (Only Green) #3" ], [ "systemhealthp1", "System Health (Progessive) #1" ], [ "systemhealthp2", "System Health (Progessive) #2" ], [ "systemhealthp3", "System Health (Progessive) #3" ], ], [ [ "utilization", "Placement Strategy - utilization" ], [ "minimal", "Placement Strategy - minimal" ], [ "balanced", "Placement Strategy - balanced" ], ], [ [ "placement-stickiness", "Optimized Placement Strategy - stickiness" ], [ "placement-priority", "Optimized Placement Strategy - priority" ], [ "placement-location", "Optimized Placement Strategy - location" ], [ "placement-capacity", "Optimized Placement Strategy - capacity" ], ], [ [ "utilization-order1", "Utilization Order - Simple" ], [ "utilization-order2", "Utilization Order - Complex" ], [ "utilization-order3", "Utilization Order - Migrate" ], [ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ], [ "utilization-shuffle", "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ], [ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ], [ "load-stopped-loop-2", "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ], ], [ [ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ], [ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ], [ "colocated-utilization-group", "Colocated Utilization - Group" ], [ "colocated-utilization-clone", "Colocated Utilization - Clone" ], [ "utilization-check-allowed-nodes", "Only check the capacities of the nodes that can run the resource" ], ], [ [ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ], [ "node-maintenance-1", "cl#5128 - Node maintenance" ], [ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ], [ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ], [ "rsc-maintenance", "Per-resource maintenance" ], ], [ [ "not-installed-agent", "The resource agent is missing" ], [ "not-installed-tools", "Something the resource agent needs is missing" ], ], [ [ "stopped-monitor-00", "Stopped Monitor - initial start" ], [ "stopped-monitor-01", "Stopped Monitor - failed started" ], [ "stopped-monitor-02", "Stopped Monitor - started multi-up" ], [ "stopped-monitor-03", "Stopped Monitor - stop started" ], [ "stopped-monitor-04", "Stopped Monitor - failed stop" ], [ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ], [ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ], [ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ], [ "stopped-monitor-08", "Stopped Monitor - migrate" ], [ "stopped-monitor-09", "Stopped Monitor - unmanage started" ], [ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ], [ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ], [ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ], [ "stopped-monitor-20", "Stopped Monitor - initial stop" ], [ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ], [ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ], [ "stopped-monitor-23", "Stopped Monitor - start stopped" ], [ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ], [ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ], [ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ], [ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ], [ "stopped-monitor-30", "Stopped Monitor - new node started" ], [ "stopped-monitor-31", "Stopped Monitor - new node stopped" ], ], [ # This is a combo test to check: # - probe timeout defaults to the minimum-interval monitor's # - duplicate recurring operations are ignored # - if timeout spec is bad, the default timeout is used # - failure is blocked with on-fail=block even if ISO8601 interval is specified # - started/stopped role monitors are started/stopped on right nodes [ "intervals", "Recurring monitor interval handling" ], ], [ [ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ], [ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ], [ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ], [ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ], [ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ], [ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ], [ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ], [ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ], [ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ], [ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ], [ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ], [ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ], [ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ], [ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ], [ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ], [ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ], [ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ], [ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ], [ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ], [ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ], [ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ], [ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ], [ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ], [ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ], [ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ], [ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ], [ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ], [ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ], [ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ], [ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ], [ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ], [ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ], [ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ], [ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ], [ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ], [ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ], [ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ], [ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ], [ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ], [ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ], [ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ], [ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ], [ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ], [ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ], [ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ], [ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ], [ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ], [ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ], [ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ], [ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ], [ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ], [ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ], [ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ], [ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ], [ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ], [ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ], [ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ], [ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ], [ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ], [ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ], [ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ], [ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ], [ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ], [ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ], [ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ], [ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ], [ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ], [ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ], [ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ], [ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ], [ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ], [ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ], [ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ], [ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ], [ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ], [ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ], [ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ], [ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ], [ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ], [ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ], [ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ], [ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ], [ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ], [ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ], [ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ], [ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ], [ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ], [ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ], [ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ], [ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ], [ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ], [ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ], [ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ], [ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ], [ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ], [ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ], [ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ], [ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ], [ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ], [ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ], [ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ], [ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ], [ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ], [ "site-specific-params", "Site-specific instance attributes based on rules" ], ], [ [ "template-1", "Template - 1" ], [ "template-2", "Template - 2" ], [ "template-3", "Template - 3 (merge operations)" ], [ "template-coloc-1", "Template - Colocation 1" ], [ "template-coloc-2", "Template - Colocation 2" ], [ "template-coloc-3", "Template - Colocation 3" ], [ "template-order-1", "Template - Order 1" ], [ "template-order-2", "Template - Order 2" ], [ "template-order-3", "Template - Order 3" ], [ "template-ticket", "Template - Ticket" ], [ "template-rsc-sets-1", "Template - Resource Sets 1" ], [ "template-rsc-sets-2", "Template - Resource Sets 2" ], [ "template-rsc-sets-3", "Template - Resource Sets 3" ], [ "template-rsc-sets-4", "Template - Resource Sets 4" ], [ "template-clone-primitive", "Cloned primitive from template" ], [ "template-clone-group", "Cloned group from template" ], [ "location-sets-templates", "Resource sets and templates - Location" ], [ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ], [ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ], [ "tags-location", "Tags - Location" ], [ "tags-ticket", "Tags - Ticket" ], ], [ [ "container-1", "Container - initial" ], [ "container-2", "Container - monitor failed" ], [ "container-3", "Container - stop failed" ], [ "container-4", "Container - reached migration-threshold" ], [ "container-group-1", "Container in group - initial" ], [ "container-group-2", "Container in group - monitor failed" ], [ "container-group-3", "Container in group - stop failed" ], [ "container-group-4", "Container in group - reached migration-threshold" ], [ "container-is-remote-node", "Place resource within container when container is remote-node" ], [ "bug-rh-1097457", "Kill user defined container/contents ordering" ], [ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ], [ "bundle-order-startup", "Bundle startup ordering" ], [ "bundle-order-partial-start", "Bundle startup ordering when some dependencies are already running" ], [ "bundle-order-partial-start-2", "Bundle startup ordering when some dependencies and the container are already running" ], [ "bundle-order-stop", "Bundle stop ordering" ], [ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ], [ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ], [ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ], [ "bundle-order-startup-clone-2", "Bundle startup with clones" ], [ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ], [ "bundle-nested-colocation", "Colocation of nested connection resources" ], [ "bundle-order-fencing", "Order pseudo bundle fencing after parent node fencing if both are happening" ], [ "bundle-probe-order-1", "order 1" ], [ "bundle-probe-order-2", "order 2" ], [ "bundle-probe-order-3", "order 3" ], [ "bundle-probe-remotes", "Ensure remotes get probed too" ], [ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ], [ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ], ], [ [ "whitebox-fail1", "Fail whitebox container rsc" ], [ "whitebox-fail2", "Fail cluster connection to guest node" ], [ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ], [ "whitebox-start", "Start whitebox container with resources assigned to it" ], [ "whitebox-stop", "Stop whitebox container with resources assigned to it" ], [ "whitebox-move", "Move whitebox container with resources assigned to it" ], [ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ], [ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ], [ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ], [ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ], [ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ], [ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ], [ "whitebox-migrate1", "Migrate both container and connection resource" ], [ "whitebox-imply-stop-on-fence", "imply stop action on container node rsc when host node is fenced" ], [ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ], [ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ], [ "guest-node-cleanup", "Order guest node connection recovery after container probe" ], [ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ], ], [ [ "remote-startup-probes", "Baremetal remote-node startup probes" ], [ "remote-startup", "Startup a newly discovered remote-nodes with no status" ], [ "remote-fence-unclean", "Fence unclean baremetal remote-node" ], [ "remote-fence-unclean2", "Fence baremetal remote-node after cluster node fails and connection can not be recovered" ], [ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ], [ "remote-move", "Move remote-node connection resource" ], [ "remote-disable", "Disable a baremetal remote-node" ], [ "remote-probe-disable", "Probe then stop a baremetal remote-node" ], [ "remote-orphaned", "Properly shutdown orphaned connection resource" ], [ "remote-orphaned2", "verify we can handle orphaned remote connections with active resources on the remote" ], [ "remote-recover", "Recover connection resource after cluster-node fails" ], [ "remote-stale-node-entry", "Make sure we properly handle leftover remote-node entries in the node section" ], [ "remote-partial-migrate", "Make sure partial migrations are handled before ops on the remote node" ], [ "remote-partial-migrate2", "Make sure partial migration target is prefered for remote connection" ], [ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ], [ "remote-start-fail", "Make sure a start failure does not result in fencing if no active resources are on remote" ], [ "remote-unclean2", "Make monitor failure always results in fencing, even if no rsc are active on remote" ], [ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ], [ "remote-recovery", "Recover remote connections before attempting demotion" ], [ "remote-recover-connection", "Optimistically recovery of only the connection" ], [ "remote-recover-all", "Fencing when the connection has no home" ], [ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ], [ "remote-recover-unknown", "Fencing when the connection has no home and the remote has no operation history" ], [ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ], [ "remote-connection-unrecoverable", "Remote connection host must be fenced, with connection unrecoverable" ], [ "cancel-behind-moving-remote", "Route recurring monitor cancellations through original node of a moving remote connection" ], ], [ [ "resource-discovery", "Exercises resource-discovery location constraint option" ], [ "rsc-discovery-per-node", "Disable resource discovery per node" ], [ "shutdown-lock", "Ensure shutdown lock works properly" ], [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], ], [ [ "op-defaults", "Test op_defaults conditional expressions" ], [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], [ "op-defaults-3", "Test op_defaults precedence" ], [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ], ], [ [ "stop-all-resources", "Test stop-all-resources=true "], ], [ [ "ocf_degraded-remap-ocf_ok", "Test DEGRADED remapped to OK" ], [ "ocf_degraded_master-remap-ocf_ok", "Test DEGRADED_MASTER remapped to OK"], ], # @TODO: If pacemaker implements versioned attributes, uncomment these tests #[ # [ "versioned-resources", "Start resources with #ra-version rules" ], # [ "restart-versioned", "Restart resources on #ra-version change" ], # [ "reload-versioned", "Reload resources on #ra-version change" ], #], #[ # [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ], # [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ], # [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ], # [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ], #], ] TESTS_64BIT = [ [ [ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ], ], ] # Constants substituted in the build process class BuildVars(object): SBINDIR = "@sbindir@" BUILDDIR = "@abs_top_builddir@" CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@" # These values must be kept in sync with include/crm/crm.h class CrmExit(object): OK = 0 ERROR = 1 NOT_INSTALLED = 5 NOINPUT = 66 def is_executable(path): """ Check whether a file at a given path is executable. """ try: return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR except OSError: return False def diff(file1, file2, **kwargs): """ Call diff on two files """ return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space", "--ignore-blank-lines", file1, file2 ], **kwargs) def sort_file(filename): """ Sort a file alphabetically """ with io.open(filename, "rt") as f: lines = sorted(f) with io.open(filename, "wt") as f: f.writelines(lines) def remove_files(filenames): """ Remove a list of files """ for filename in filenames: try: os.remove(filename) except OSError: pass def normalize(filename): """ Remove text from a file that isn't important for comparison """ if not hasattr(normalize, "patterns"): normalize.patterns = [ re.compile(r'crm_feature_set="[^"]*"'), re.compile(r'batch-limit="[0-9]*"') ] if os.path.isfile(filename): with io.open(filename, "rt") as f: lines = f.readlines() with io.open(filename, "wt") as f: for line in lines: for pattern in normalize.patterns: line = pattern.sub("", line) f.write(line) def cat(filename, dest=sys.stdout): """ Copy a file to a destination file descriptor """ with io.open(filename, "rt") as f: shutil.copyfileobj(f, dest) class CtsScheduler(object): """ Regression tests for Pacemaker's scheduler """ def _parse_args(self, argv): """ Parse command-line arguments """ parser = argparse.ArgumentParser(description=DESC) parser.add_argument('-V', '--verbose', action='count', help='Display any differences from expected output') parser.add_argument('--run', metavar='TEST', help=('Run only single specified test (any further ' 'arguments will be passed to crm_simulate)')) parser.add_argument('--update', action='store_true', help='Update expected results with actual results') parser.add_argument('-b', '--binary', metavar='PATH', help='Specify path to crm_simulate') parser.add_argument('-i', '--io-dir', metavar='PATH', help='Specify path to regression test data directory') parser.add_argument('-o', '--out-dir', metavar='PATH', help='Specify where intermediate and output files should go') parser.add_argument('-v', '--valgrind', action='store_true', help='Run all commands under valgrind') parser.add_argument('--valgrind-dhat', action='store_true', help='Run all commands under valgrind with heap analyzer') parser.add_argument('--valgrind-skip-output', action='store_true', help='If running under valgrind, do not display output') parser.add_argument('--testcmd-options', metavar='OPTIONS', default='', help='Additional options for command under test') # argparse can't handle "everything after --run TEST", so grab that self.single_test_args = [] narg = 0 for arg in argv: narg = narg + 1 if arg == '--run': (argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:]) break self.args = parser.parse_args(argv[1:]) def _error(self, s): print(" * ERROR: %s" % s) def _failed(self, s): print(" * FAILED: %s" % s) def _get_valgrind_cmd(self): """ Return command arguments needed (or not) to run valgrind """ if self.args.valgrind: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "-q", "--gen-suppressions=all", "--time-stamp=yes", "--trace-children=no", "--show-reachable=no", "--leak-check=full", "--num-callers=20", "--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home) ] if self.args.valgrind_dhat: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "--tool=exp-dhat", "--time-stamp=yes", "--trace-children=no", "--show-top-n=100", "--num-callers=4" ] return [] def _get_simulator_cmd(self): """ Locate the simulation binary """ if self.args.binary is None: self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate" if not is_executable(self.args.binary): self.args.binary = BuildVars.SBINDIR + "/crm_simulate" if not is_executable(self.args.binary): # @TODO it would be more pythonic to raise an exception self._error("Test binary " + self.args.binary + " not found") sys.exit(CrmExit.NOT_INSTALLED) return [ self.args.binary ] + shlex.split(self.args.testcmd_options) def set_schema_env(self): """ Ensure schema directory environment variable is set, if possible """ try: return os.environ['PCMK_schema_directory'] except KeyError: for d in [ os.path.join(BuildVars.BUILDDIR, "xml"), BuildVars.CRM_SCHEMA_DIRECTORY ]: if os.path.isdir(d): os.environ['PCMK_schema_directory'] = d return d return None def __init__(self, argv=sys.argv): self._parse_args(argv) # Where this executable lives self.test_home = os.path.dirname(os.path.realpath(argv[0])) # Where test data resides if self.args.io_dir is None: self.args.io_dir = os.path.join(self.test_home, "scheduler") # Where to store generated files if self.args.out_dir is None: self.args.out_dir = self.args.io_dir self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff") else: self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff") os.environ['CIB_shadow_dir'] = self.args.out_dir self.failed_file = None # Single test mode (if requested) try: # User can give test base name or file name of a test input self.args.run = os.path.splitext(os.path.basename(self.args.run))[0] except (AttributeError, TypeError): pass # --run was not specified self.set_schema_env() # Arguments needed (or not) to run commands self.valgrind_args = self._get_valgrind_cmd() self.simulate_args = self._get_simulator_cmd() # Test counters self.num_failed = 0 self.num_tests = 0 def _compare_files(self, filename1, filename2): """ Add any file differences to failed results """ - with io.open("/dev/null", "wt") as dev_null: - if diff(filename1, filename2, stdout=dev_null) != 0: - diff(filename1, filename2, stdout=self.failed_file, stderr=dev_null) - self.failed_file.write("\n"); - return True + if diff(filename1, filename2, stdout=subprocess.DEVNULL) != 0: + diff(filename1, filename2, stdout=self.failed_file, stderr=subprocess.DEVNULL) + self.failed_file.write("\n"); + return True return False def run_one(self, test_name, test_desc, test_args=[]): """ Run one scheduler test """ print(" Test %-25s %s" % ((test_name + ":"), test_desc)) did_fail = False self.num_tests = self.num_tests + 1 # Test inputs input_filename = "%s/%s.xml" % (self.args.io_dir, test_name) expected_filename = "%s/%s.exp" % (self.args.io_dir, test_name) dot_expected_filename = "%s/%s.dot" % (self.args.io_dir, test_name) scores_filename = "%s/%s.scores" % (self.args.io_dir, test_name) summary_filename = "%s/%s.summary" % (self.args.io_dir, test_name) stderr_expected_filename = "%s/%s.stderr" % (self.args.io_dir, test_name) # (Intermediate) test outputs output_filename = "%s/%s.out" % (self.args.out_dir, test_name) dot_output_filename = "%s/%s.pe.dot" % (self.args.out_dir, test_name) score_output_filename = "%s/%s.scores.pe" % (self.args.out_dir, test_name) summary_output_filename = "%s/%s.summary.pe" % (self.args.out_dir, test_name) stderr_output_filename = "%s/%s.stderr.pe" % (self.args.out_dir, test_name) valgrind_output_filename = "%s/%s.valgrind" % (self.args.out_dir, test_name) # Common arguments for running test test_cmd = [] if self.valgrind_args: test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ] test_cmd = test_cmd + self.simulate_args # @TODO It would be more pythonic to raise exceptions for errors, # then perhaps it would be nice to make a single-test class # Ensure necessary test inputs exist if not os.path.isfile(input_filename): self._error("No input") self.num_failed = self.num_failed + 1 return CrmExit.NOINPUT if not self.args.update and not os.path.isfile(expected_filename): self._error("no stored output") return CrmExit.NOINPUT # Run simulation to generate summary output if self.args.run: # Single test mode test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args print(" ".join(test_cmd_full)) else: # @TODO Why isn't test_args added here? test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] with io.open(summary_output_filename, "wt") as f: simulation = subprocess.Popen(test_cmd_full, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ) # This makes diff happy regardless of PCMK__COMPAT_2_0 sed = subprocess.Popen(["sed", "-e", "s/ocf::/ocf:/g"], stdin=simulation.stdout, stdout=f, stderr=subprocess.STDOUT) simulation.stdout.close() sed.communicate() if self.args.run: cat(summary_output_filename) # Re-run simulation to generate dot, graph, and scores test_cmd_full = test_cmd + [ '-x', input_filename, '-D', dot_output_filename, '-G', output_filename, '-sSQ' ] + test_args with io.open(stderr_output_filename, "wt") as f_stderr, \ io.open(score_output_filename, "wt") as f_score: rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ) # Check for test command failure if rc != CrmExit.OK: self._failed("Test returned: %d" % rc) did_fail = True print(" ".join(test_cmd_full)) # Check for valgrind errors if self.valgrind_args and not self.args.valgrind_skip_output: if os.stat(valgrind_output_filename).st_size > 0: self._failed("Valgrind reported errors") did_fail = True cat(valgrind_output_filename) remove_files([ valgrind_output_filename ]) # Check for core dump if os.path.isfile("core"): self._failed("Core-file detected: core." + test_name) did_fail = True os.rename("core", "%s/core.%s" % (self.test_home, test_name)) # Check any stderr output if os.path.isfile(stderr_expected_filename): if self._compare_files(stderr_expected_filename, stderr_output_filename): self._failed("stderr changed") did_fail = True elif os.stat(stderr_output_filename).st_size > 0: self._failed("Output was written to stderr") did_fail = True cat(stderr_output_filename) remove_files([ stderr_output_filename ]) # Check whether output graph exists, and normalize it if (not os.path.isfile(output_filename) or os.stat(output_filename).st_size == 0): self._error("No graph produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ output_filename ]) return CrmExit.ERROR normalize(output_filename) # Check whether dot output exists, and sort it if (not os.path.isfile(dot_output_filename) or os.stat(dot_output_filename).st_size == 0): self._error("No dot-file summary produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ dot_output_filename, output_filename ]) return CrmExit.ERROR with io.open(dot_output_filename, "rt") as f: first_line = f.readline() # "digraph" line with opening brace lines = f.readlines() last_line = lines[-1] # closing brace del lines[-1] lines = sorted(set(lines)) # unique sort with io.open(dot_output_filename, "wt") as f: f.write(first_line) f.writelines(lines) f.write(last_line) # Check whether score output exists, and sort it if (not os.path.isfile(score_output_filename) or os.stat(score_output_filename).st_size == 0): self._error("No allocation scores produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ score_output_filename, output_filename ]) return CrmExit.ERROR else: sort_file(score_output_filename) if self.args.update: shutil.copyfile(output_filename, expected_filename) shutil.copyfile(dot_output_filename, dot_expected_filename) shutil.copyfile(score_output_filename, scores_filename) shutil.copyfile(summary_output_filename, summary_filename) print(" Updated expected outputs") if self._compare_files(summary_filename, summary_output_filename): self._failed("summary changed") did_fail = True if self._compare_files(dot_expected_filename, dot_output_filename): self._failed("dot-file summary changed") did_fail = True else: remove_files([ dot_output_filename ]) if self._compare_files(expected_filename, output_filename): self._failed("xml-file changed") did_fail = True if self._compare_files(scores_filename, score_output_filename): self._failed("scores-file changed") did_fail = True remove_files([ output_filename, score_output_filename, summary_output_filename]) if did_fail: self.num_failed = self.num_failed + 1 return CrmExit.ERROR return CrmExit.OK def run_all(self): """ Run all defined tests """ if platform.architecture()[0] == "64bit": TESTS.extend(TESTS_64BIT) for group in TESTS: for test in group: try: args = test[2] except IndexError: args = [] self.run_one(test[0], test[1], args) print() def _print_summary(self): """ Print a summary of parameters for this test run """ print("Test home is:\t" + self.test_home) print("Test binary is:\t" + self.args.binary) if 'PCMK_schema_directory' in os.environ: print("Schema home is:\t" + os.environ['PCMK_schema_directory']) if self.valgrind_args != []: print("Activating memory testing with valgrind") print() def _test_results(self): if self.num_failed == 0: return CrmExit.OK if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0: if self.args.verbose: self._error("Results of %d failed tests (out of %d):" % (self.num_failed, self.num_tests)) cat(self.failed_filename) else: self._error("Results of %d failed tests (out of %d) are in %s" % (self.num_failed, self.num_tests, self.failed_filename)) self._error("Use -V to display them after running the tests") else: self._error("%d (of %d) tests failed (no diff results)" % (self.num_failed, self.num_tests)) if os.path.isfile(self.failed_filename): os.remove(self.failed_filename) return CrmExit.ERROR def run(self): """ Run test(s) as specified """ self._print_summary() # Zero out the error log self.failed_file = io.open(self.failed_filename, "wt") if self.args.run is None: print("Performing the following tests from " + self.args.io_dir) print() self.run_all() print() self.failed_file.close() rc = self._test_results() else: rc = self.run_one(self.args.run, "Single shot", self.single_test_args) self.failed_file.close() cat(self.failed_filename) return rc if __name__ == "__main__": sys.exit(CtsScheduler().run()) # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120: diff --git a/doc/sphinx/Pacemaker_Development/python.rst b/doc/sphinx/Pacemaker_Development/python.rst index 9a828771e3..b97919685a 100644 --- a/doc/sphinx/Pacemaker_Development/python.rst +++ b/doc/sphinx/Pacemaker_Development/python.rst @@ -1,87 +1,81 @@ .. index:: single: Python pair: Python; guidelines Python Coding Guidelines ------------------------ .. index:: pair: Python; boilerplate pair: license; Python pair: copyright; Python .. _s-python-boilerplate: Python Boilerplate ################## If a Python file is meant to be executed (as opposed to imported), it should have a ``.in`` extension, and its first line should be: .. code-block:: python #!@PYTHON@ which will be replaced with the appropriate python executable when Pacemaker is built. To make that happen, add an ``AC_CONFIG_FILES()`` line to ``configure.ac``, and add the file name without ``.in`` to ``.gitignore`` (see existing examples). After the above line if any, every Python file should start like this: .. code-block:: python """ """ __copyright__ = "Copyright the Pacemaker project contributors" __license__ = " WITHOUT ANY WARRANTY" ** is obviously a brief description of the file's purpose. The string may contain any other information typically used in a Python file `docstring `_. ```` should follow the policy set forth in the `COPYING `_ file, generally one of "GNU General Public License version 2 or later (GPLv2+)" or "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)". .. index:: single: Python; 3 single: Python; version Python Version Compatibility ############################ -Pacemaker targets compatibility with Python 3.2 and later. +Pacemaker targets compatibility with Python 3.4 and later. -Do not use features not available in all targeted Python versions. Some -examples include: - - * The ``u`` prefix for string literals - * The ``ipaddress`` module - * The ``subprocess.run()`` function - * The ``subprocess.DEVNULL`` constant - * ``subprocess`` module-specific exceptions +Do not use features not available in all targeted Python versions. An +example is the ``subprocess.run()`` function. .. index:: pair: Python; whitespace Formatting Python Code ###################### * Indentation must be 4 spaces, no tabs. * Do not leave trailing whitespace. * Lines should be no longer than 80 characters unless limiting line length significantly impacts readability. For Python, this limitation is flexible since breaking a line often impacts readability, but definitely keep it under 120 characters. * Where not conflicting with this style guide, it is recommended (but not required) to follow `PEP 8 `_. * It is recommended (but not required) to format Python code such that ``pylint --disable=line-too-long,too-many-lines,too-many-instance-attributes,too-many-arguments,too-many-statements`` produces minimal complaints (even better if you don't need to disable all those checks). diff --git a/include/portability.h b/include/portability.h index 2750120d43..390cacd542 100644 --- a/include/portability.h +++ b/include/portability.h @@ -1,168 +1,162 @@ /* - * Copyright 2001-2020 the Pacemaker project contributors + * Copyright 2001-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PORTABILITY_H # define PORTABILITY_H # define EOS '\0' # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) /* Needs to be defined before any other includes, otherwise some system * headers do not behave as expected! Major black magic... */ # undef _GNU_SOURCE /* in case it was defined on the command line */ # define _GNU_SOURCE /* Please leave this as the first #include - Solaris needs it there */ # ifdef HAVE_CONFIG_H # ifndef PCMK__CONFIG_H # define PCMK__CONFIG_H # include # endif # endif /* Prototypes for libreplace functions */ # ifndef HAVE_DAEMON /* We supply a replacement function, but need a prototype */ int daemon(int nochdir, int noclose); # endif /* HAVE_DAEMON */ # ifndef HAVE_SETENV /* We supply a replacement function, but need a prototype */ int setenv(const char *name, const char *value, int why); # endif /* HAVE_SETENV */ # ifndef HAVE_STRERROR /* We supply a replacement function, but need a prototype */ char *strerror(int errnum); # endif /* HAVE_STRERROR */ # ifndef HAVE_STRCHRNUL /* We supply a replacement function, but need a prototype */ char *strchrnul(const char *s, int c_in); # endif /* HAVE_STRCHRNUL */ # ifndef HAVE_ALPHASORT # include int alphasort(const void *dirent1, const void *dirent2); # endif /* HAVE_ALPHASORT */ # ifndef HAVE_STRNLEN size_t strnlen(const char *s, size_t maxlen); # else # define USE_GNU # endif # ifndef HAVE_STRNDUP char *strndup(const char *str, size_t len); # else # define USE_GNU # endif # include -# if !GLIB_CHECK_VERSION(2,38,0) -# define g_assert_true(expr) g_assert_cmpint((expr), ==, TRUE) -# define g_assert_false(expr) g_assert_cmpint((expr), !=, TRUE) -# define g_assert_null(expr) g_assert_cmpint((expr) == NULL, ==, TRUE) -# endif - # if SUPPORT_DBUS # ifndef HAVE_DBUSBASICVALUE # include # include /** * An 8-byte struct you could use to access int64 without having * int64 support */ typedef struct { uint32_t first32; /**< first 32 bits in the 8 bytes (beware endian issues) */ uint32_t second32; /**< second 32 bits in the 8 bytes (beware endian issues) */ } DBus8ByteStruct; /** * A simple value union that lets you access bytes as if they * were various types; useful when dealing with basic types via * void pointers and varargs. * * This union also contains a pointer member (which can be used * to retrieve a string from dbus_message_iter_get_basic(), for * instance), so on future platforms it could conceivably be larger * than 8 bytes. */ typedef union { unsigned char bytes[8]; /**< as 8 individual bytes */ int16_t i16; /**< as int16 */ uint16_t u16; /**< as int16 */ int32_t i32; /**< as int32 */ uint32_t u32; /**< as int32 */ uint32_t bool_val; /**< as boolean */ # ifdef DBUS_HAVE_INT64 int64_t i64; /**< as int64 */ uint64_t u64; /**< as int64 */ # endif DBus8ByteStruct eight; /**< as 8-byte struct */ double dbl; /**< as double */ unsigned char byt; /**< as byte */ char *str; /**< as char* (string, object path or signature) */ int fd; /**< as Unix file descriptor */ } DBusBasicValue; # endif # endif /* Replacement error codes for non-linux */ # include # ifndef ENOTUNIQ # define ENOTUNIQ 190 # endif # ifndef ECOMM # define ECOMM 191 # endif # ifndef ELIBACC # define ELIBACC 192 # endif # ifndef EREMOTEIO # define EREMOTEIO 193 # endif # ifndef EUNATCH # define EUNATCH 194 # endif # ifndef ENOKEY # define ENOKEY 195 # endif # ifndef ENODATA # define ENODATA 196 # endif # ifndef ETIME # define ETIME 197 # endif # ifndef ENOSR # define ENOSR 198 # endif # ifndef ENOSTR # define ENOSTR 199 # endif # ifndef EKEYREJECTED # define EKEYREJECTED 200 # endif #endif /* PORTABILITY_H */ diff --git a/lib/Makefile.am b/lib/Makefile.am index e3e927c780..ed5bfa3964 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -1,34 +1,21 @@ # -# Copyright 2003-2019 the Pacemaker project contributors +# Copyright 2003-2021 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # MAINTAINERCLEANFILES = Makefile.in LIBS = cib lrmd service fencing cluster -PC_FILES = $(LIBS:%=pacemaker-%.pc) \ +pkgconfig_DATA = $(LIBS:%=pacemaker-%.pc) \ libpacemaker.pc \ pacemaker.pc \ pacemaker-pe_rules.pc \ pacemaker-pe_status.pc -EXTRA_DIST = $(PC_FILES:%=%.in) - -all-local: $(PC_FILES) - -install-exec-local: $(PC_FILES) - $(INSTALL) -d $(DESTDIR)/$(libdir)/pkgconfig - $(INSTALL) -m 644 $(PC_FILES) $(DESTDIR)/$(libdir)/pkgconfig - -uninstall-local: - -cd $(DESTDIR)/$(libdir)/pkgconfig && rm -f $(PC_FILES) - -rmdir $(DESTDIR)/$(libdir)/pkgconfig 2> /dev/null - -clean-local: - -rm -f $(PC_FILES) +EXTRA_DIST = $(pkgconfig_DATA:%=%.in) SUBDIRS = gnu common pengine cib services fencing lrmd cluster pacemaker diff --git a/mk/common.mk b/mk/common.mk index 578f268265..464e43d700 100644 --- a/mk/common.mk +++ b/mk/common.mk @@ -1,106 +1,99 @@ # # Copyright 2014-2020 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # # Some variables to help with silent rules # https://www.gnu.org/software/automake/manual/html_node/Automake-silent_002drules-Option.html -# -# We require a minimum automake version of 1.11, which includes AM_V_GEN and -# AM_V_at, but AM_V_P is not available until 1.13. V ?= $(AM_DEFAULT_VERBOSITY) # When a make command is prefixed with one of the AM_V_* macros, it may also be # desirable to suffix the command with this, to silence stdout. PCMK_quiet = $(pcmk_quiet_$(V)) pcmk_quiet_0 = >/dev/null pcmk_quiet_1 = # AM_V_GEN is intended to be used in custom pattern rules, and replaces echoing # the command used with a more concise line with "GEN" and the name of the file # being generated. Our AM_V_* macros are similar but more descriptive. AM_V_MAN = $(am__v_MAN_$(V)) am__v_MAN_0 = @echo " MAN $@"; am__v_MAN_1 = AM_V_SCHEMA = $(am__v_SCHEMA_$(V)) am__v_SCHEMA_0 = @echo " SCHEMA $@"; am__v_SCHEMA_1 = -AM_V_PUB = $(am__v_PUB_$(V)) -am__v_PUB_0 = @echo " PUB $@: $(DOCBOOK_FORMATS)"; -am__v_PUB_1 = - AM_V_BOOK = $(am__v_BOOK_$(V)) am__v_BOOK_0 = @echo " BOOK $(@:%/_build=%): $(BOOK_FORMATS)"; am__v_BOOK_1 = MAINTAINERCLEANFILES = Makefile.in AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl # # Man page builders # # We have three types of man pages: # - man pages for the tools # - man pages for OCF agents # - man pages for cluster properties used by daemons # # "BUILD_HELP" actually means "help2man is available", so it only controls the # tool man pages, which are generated by help2man. The other man pages are # generated via XSL transforms. # if BUILD_HELP man8_MANS = $(sbin_PROGRAMS:%=%.8) $(sbin_SCRIPTS:%=%.8) HELP2MAN_ARGS = -N --section 8 --name "Part of the Pacemaker cluster resource manager" # Some of our tools' help are just shell script invocations of another tool's # help. Putting the real tool in MAN8DEPS helps detect when the wrapped help # needs updating. # # If a ".inc" file exists, the tool has been converted to use glib for # argument parsing, otherwise it still uses the libcrmcommon functions. # # @TODO Drop MAN8DEPS once we've converted all tools to libpacemaker API calls # and all wrappers to C code. %.8: % $(MAN8DEPS) $(AM_V_at)chmod a+x $(abs_builddir)/$< $(AM_V_MAN)if [ -f $(top_srcdir)/tools/$@.inc ]; then \ PATH=$(abs_builddir):$$PATH $(HELP2MAN) $(HELP2MAN_ARGS) \ -h --help-all \ --no-discard-stderr \ -i $(top_srcdir)/tools/$@.inc $(abs_builddir)/$< \ | sed -f $(top_srcdir)/tools/fix-manpages > $@ ; \ else \ PATH=$(abs_builddir):$$PATH $(HELP2MAN) $(HELP2MAN_ARGS) \ $(abs_builddir)/$< --output $@ ; \ fi endif # Save raw XML meta-data from daemon executables, for later conversion into man # pages. (Note that more specific rules may override this for creating other # types of XML files.) %.xml: % $(AM_V_at)$(abs_builddir)/$< metadata > $@ # Process the raw daemon and OCF agent meta-data output using our # meta-data-to-docbook-XML tranform. %.dbook: %.xml $(AM_V_at)$(XSLTPROC) --nonet --novalid --stringparam man.name $* \ $(DBOOK_OPTS) $(top_srcdir)/xml/ocf-meta2man.xsl \ $(abs_builddir)/$< > $(abs_builddir)/$@ # Generate the actual man page for an OCF resource agent from the intermediate # docbook XML. %.7: %.dbook $(AM_V_MAN)$(XSLTPROC) $(MANPAGE_XSLT) $(abs_builddir)/$< $(PCMK_quiet) diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in index 45818b9cf7..2753ef8682 100644 --- a/rpm/pacemaker.spec.in +++ b/rpm/pacemaker.spec.in @@ -1,859 +1,859 @@ # User-configurable globals and defines to control package behavior # (these should not test {with X} values, which are declared later) ## User and group to use for nonprivileged services %global uname hacluster %global gname haclient ## Where to install Pacemaker documentation %if 0%{?suse_version} > 0 %global pcmk_docdir %{_docdir}/%{name}-%{version} %else %if 0%{?rhel} > 7 %global pcmk_docdir %{_docdir}/%{name}-doc %else %global pcmk_docdir %{_docdir}/%{name} %endif %endif ## GitHub entity that distributes source (for ease of using a fork) %global github_owner ClusterLabs ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) %global pcmkversion X.Y.Z %global specversion 1 ## Upstream commit (full commit ID, abbreviated commit ID, or tag) to build %global commit HEAD ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %global commit_abbrev 7 # Define conditionals so that "rpmbuild --with " and # "rpmbuild --without " can enable and disable specific features ## Add option to enable support for stonith/external fencing agents %bcond_with stonithd ## Add option to enable support for storing sensitive information outside CIB %bcond_with cibsecrets ## Add option to create binaries suitable for use with profiling tools %bcond_with profiling ## Add option to create binaries with coverage analysis %bcond_with coverage ## Add option to skip generating documentation ## (the build tools aren't available everywhere) %bcond_without doc ## Add option to prefix package version with "0." ## (so later "official" packages will be considered updates) %bcond_with pre_release ## Add option to ship Upstart job files %bcond_with upstart_job ## Add option to turn off hardening of libraries and daemon executables %bcond_without hardening ## Add option to enable links for legacy daemon names %bcond_with legacy_links # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) ## Whether this is a tagged release (final or release candidate) %define tag_release %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo 1 ;; *%{rparen} echo 0 ;; esac) ## Portion of export/dist tarball name after "pacemaker-", and release version %if 0%{tag_release} %define archive_version %(c=%{commit}; echo ${c:10}) %define archive_github_url %{commit}#/%{name}-%{archive_version}.tar.gz %define pcmk_release %(c=%{commit}; case $c in *-rc[[:digit:]]*%{rparen} echo 0.%{specversion}.${c: -3} ;; *%{rparen} echo %{specversion} ;; esac) %else %define archive_version %(c=%{commit}; echo ${c:0:%{commit_abbrev}}) %define archive_github_url %{archive_version}#/%{name}-%{archive_version}.tar.gz %if %{with pre_release} %define pcmk_release 0.%{specversion}.%{archive_version}.git %else %define pcmk_release %{specversion}.%{archive_version}.git %endif %endif ## Whether this platform defaults to using systemd as an init system ## (needs to be evaluated prior to BuildRequires being enumerated and ## installed as it's intended to conditionally select some of these, and ## for that there are only few indicators with varying reliability: ## - presence of systemd-defined macros (when building in a full-fledged ## environment, which is not the case with ordinary mock-based builds) ## - systemd-aware rpm as manifested with the presence of particular ## macro (rpm itself will trivially always be present when building) ## - existence of /usr/lib/os-release file, which is something heavily ## propagated by systemd project ## - when not good enough, there's always a possibility to check ## particular distro-specific macros (incl. version comparison) %define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \ } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \ } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) %if 0%{?fedora} > 20 || 0%{?rhel} > 7 ## Base GnuTLS cipher priorities (presumably only the initial, required keyword) ## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'" %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM} %endif %if !%{defined _rundir} %if 0%{?fedora} >= 15 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1200 %define _rundir /run %else %define _rundir /var/run %endif %endif %if 0%{?fedora} > 22 || 0%{?rhel} > 7 %global supports_recommends 1 %endif ## Different distros name certain packages differently ## (note: corosync libraries also differ, but all provide corosync-devel) %if 0%{?suse_version} > 0 %global pkgname_bzip2_devel libbz2-devel %global pkgname_docbook_xsl docbook-xsl-stylesheets %global pkgname_gnutls_devel libgnutls-devel %global pkgname_shadow_utils shadow %global pkgname_procps procps %global pkgname_glue_libs libglue %global pkgname_pcmk_libs lib%{name}3 %global hacluster_id 90 %else %global pkgname_libtool_devel libtool-ltdl-devel %global pkgname_libtool_devel_arch libtool-ltdl-devel%{?_isa} %global pkgname_bzip2_devel bzip2-devel %global pkgname_docbook_xsl docbook-style-xsl %global pkgname_gnutls_devel gnutls-devel %global pkgname_shadow_utils shadow-utils %global pkgname_procps procps-ng %global pkgname_glue_libs cluster-glue-libs %global pkgname_pcmk_libs %{name}-libs %global hacluster_id 189 %endif # Python-related definitions ## Turn off auto-compilation of Python files outside Python specific paths, ## so there's no risk that unexpected "__python" macro gets picked to do the ## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected) ## -- distro-dependent tricks or automake's fallback to be applied there %if %{defined _python_bytecompile_extra} %global _python_bytecompile_extra 0 %else ### the statement effectively means no RPM-native byte-compiling will occur at ### all, so distro-dependent tricks for Python-specific packages to be applied %global __os_install_post %(echo '%{__os_install_post}' | { sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) %endif ## Prefer Python 3 definitions explicitly, in case 2 is also available %if %{defined __python3} %global python_name python3 %global python_path %{__python3} %define python_site %{?python3_sitelib}%{!?python3_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else %if %{defined python_version} %global python_name python%(echo %{python_version} | cut -d'.' -f1) %define python_path %{?__python}%{!?__python:/usr/bin/%{python_name}} %else %global python_name python %global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}} %endif %define python_site %{?python_sitelib}%{!?python_sitelib:%( %{python_name} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %endif # Keep sane profiling data if requested %if %{with profiling} ## Disable -debuginfo package and stripping binaries/libraries %define debug_package %{nil} %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Url: https://www.clusterlabs.org/ Group: System Environment/Daemons # Example: https://codeload.github.com/ClusterLabs/pacemaker/tar.gz/e91769e # will download pacemaker-e91769e.tar.gz # # The ending part starting with '#' is ignored by github but necessary for # rpmbuild to know what the tar archive name is. (The downloaded file will be # named correctly only for commit IDs, not tagged releases.) # # You can use "spectool -s 0 pacemaker.spec" (rpmdevtools) to show final URL. Source0: https://codeload.github.com/%{github_owner}/%{name}/tar.gz/%{archive_github_url} Requires: resource-agents Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} %if !%{defined _unitdir} Requires: %{pkgname_procps} Requires: psmisc %endif %{?systemd_requires} Requires: %{python_path} BuildRequires: %{python_name}-devel # Pacemaker requires a minimum libqb functionality Requires: libqb >= 0.13.0 BuildRequires: libqb-devel >= 0.13.0 # Basics required for the build (even if usually satisfied through other BRs) BuildRequires: coreutils findutils grep sed # Required for core functionality BuildRequires: automake autoconf gcc libtool pkgconfig %{?pkgname_libtool_devel} -BuildRequires: pkgconfig(glib-2.0) >= 2.32 +BuildRequires: pkgconfig(glib-2.0) >= 2.42 BuildRequires: libxml2-devel libxslt-devel libuuid-devel BuildRequires: %{pkgname_bzip2_devel} # Enables optional functionality BuildRequires: ncurses-devel %{pkgname_docbook_xsl} BuildRequires: help2man %{pkgname_gnutls_devel} pam-devel pkgconfig(dbus-1) %if %{systemd_native} BuildRequires: pkgconfig(systemd) %endif Requires: corosync >= 2.0.0 BuildRequires: corosync-devel >= 2.0.0 %if %{with stonithd} BuildRequires: %{pkgname_glue_libs}-devel %endif %if %{with doc} BuildRequires: inkscape asciidoc %{python_name}-sphinx %endif Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} # Bundled bits ## Pacemaker uses the crypto/md5-buffer module from gnulib %if 0%{?fedora} || 0%{?rhel} Provides: bundled(gnulib) %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager. It supports more than 16 node clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --with(out) : cibsecrets coverage doc stonithd hardening pre_release profiling upstart_job %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %if 0%{?supports_recommends} Recommends: pcmk-cluster-manager = %{version}-%{release} # For crm_report Recommends: tar Recommends: bzip2 %endif Requires: perl-TimeDate Requires: %{pkgname_procps} Requires: psmisc Requires(post):coreutils %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{pkgname_pcmk_libs} License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries Group: System Environment/Daemons Requires(pre): %{pkgname_shadow_utils} Requires: %{name}-schemas = %{version}-%{release} # sbd 1.4.0+ supports the libpe_status API for pe_working_set_t Conflicts: sbd < 1.4.0 %description -n %{pkgname_pcmk_libs} Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs} package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %description cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package remote %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Summary: Pacemaker remote daemon for non-cluster nodes Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents %if !%{defined _unitdir} Requires: %{pkgname_procps} %endif # -remote can be fully independent of systemd %{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}} Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description remote Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-remote package contains the Pacemaker Remote daemon which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{pkgname_pcmk_libs}-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: libuuid-devel%{?_isa} %{?pkgname_libtool_devel_arch} Requires: libxml2-devel%{?_isa} libxslt-devel%{?_isa} Requires: %{pkgname_bzip2_devel}%{?_isa} glib2-devel%{?_isa} Requires: libqb-devel%{?_isa} Requires: corosync-devel >= 2.0.0 %description -n %{pkgname_pcmk_libs}-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs}-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: %{python_path} Requires: %{pkgname_pcmk_libs} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: %{pkgname_procps} Requires: psmisc BuildArch: noarch # systemd Python bindings are a separate package in some distros %if %{defined systemd_requires} %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Requires: %{python_name}-systemd %endif %endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: CC-BY-SA-4.0 Summary: Documentation for Pacemaker Group: Documentation BuildArch: noarch %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager. %package schemas License: GPLv2+ Summary: Schemas and upgrade stylesheets for Pacemaker BuildArch: noarch %description schemas Schemas and upgrade stylesheets for Pacemaker Pacemaker is an advanced, scalable High-Availability cluster resource manager. %prep %setup -q -n %{name}-%{archive_version} %build export systemdsystemunitdir=%{?_unitdir}%{!?_unitdir:no} %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will # use its own defaults otherwise; if such hardenings are completely # undesired, rpmbuild using "--without hardening" # (or "--define '_without_hardening 1'") export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %endif ./autogen.sh %if 0%{?rhel} >= 7 %global concurrent_fencing --with-concurrent-fencing-default=true %endif %if 0%{?fedora} <= 34 || 0%{?rhel} <= 8 %global compat20 --enable-compat-2.0 %endif %{configure} \ PYTHON=%{python_path} \ %{!?with_hardening: --disable-hardening} \ %{?with_legacy_links: --enable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{?with_cibsecrets: --with-cibsecrets} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ %{?concurrent_fencing} \ %{?compat20} \ --with-initdir=%{_initrddir} \ --with-runstatedir=%{_rundir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif make %{_smp_mflags} V=1 %check make %{_smp_mflags} check { cts/cts-scheduler --run load-stopped-loop \ && cts/cts-cli \ && touch .CHECKED } 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint [ -f .CHECKED ] && rm -f -- .CHECKED exit $? # TODO remove when rpm<4.14 compatibility irrelevant %install # skip automake-native Python byte-compilation, since RPM-native one (possibly # distro-confined to Python-specific directories, which is currently the only # relevant place, anyway) assures proper intrinsic alignment with wider system # (such as with py_byte_compile macro, which is concurrent Fedora/EL specific) make install \ DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \ %{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 daemons/pacemakerd/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon %if %{with upstart_job} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf install -m 644 pacemakerd/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf %endif %if %{defined _unitdir} mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif # Don't package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # For now, don't package the servicelog-related binaries built only for # ppc64le when certain dependencies are installed. If they get more exercise by # advanced users, we can reconsider. rm -f %{buildroot}/%{_sbindir}/notifyServicelogEvent rm -f %{buildroot}/%{_sbindir}/ipmiservicelogd # Don't ship init scripts for systemd based platforms %if %{defined _unitdir} rm -f %{buildroot}/%{_initrddir}/pacemaker rm -f %{buildroot}/%{_initrddir}/pacemaker_remote %endif # Byte-compile Python sources where suitable and the distro procedures known %if %{defined py_byte_compile} %{py_byte_compile %{python_path} %{buildroot}%{_datadir}/pacemaker/tests} %if !%{defined _python_bytecompile_extra} %{py_byte_compile %{python_path} %{buildroot}%{python_site}/cts} %endif %endif %if %{with coverage} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %post %if %{defined _unitdir} %systemd_post pacemaker.service %else /sbin/chkconfig --add pacemaker || : %endif %preun %if %{defined _unitdir} %systemd_preun pacemaker.service %else /sbin/service pacemaker stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker || : fi %endif %postun %if %{defined _unitdir} %systemd_postun_with_restart pacemaker.service %endif %pre remote %if %{defined _unitdir} # Stop the service before anything is touched, and remember to restart # it as one of the last actions (compared to using systemd_postun_with_restart, # this avoids suicide when sbd is in use) systemctl --quiet is-active pacemaker_remote if [ $? -eq 0 ] ; then mkdir -p %{_localstatedir}/lib/rpm-state/%{name} touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote systemctl stop pacemaker_remote >/dev/null 2>&1 else rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post remote %if %{defined _unitdir} %systemd_post pacemaker_remote.service %else /sbin/chkconfig --add pacemaker_remote || : %endif %preun remote %if %{defined _unitdir} %systemd_preun pacemaker_remote.service %else /sbin/service pacemaker_remote stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker_remote || : fi %endif %postun remote %if %{defined _unitdir} # This next line is a no-op, because we stopped the service earlier, but # we leave it here because it allows us to revert to the standard behavior # in the future if desired %systemd_postun_with_restart pacemaker_remote.service # Explicitly take care of removing the flag-file(s) upon final removal if [ "$1" -eq 0 ] ; then rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %posttrans remote %if %{defined _unitdir} if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then systemctl start pacemaker_remote >/dev/null 2>&1 rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post cli %if %{defined _unitdir} %systemd_post crm_mon.service %endif if [ "$1" -eq 2 ]; then # Package upgrade, not initial install: # Move any pre-2.0 logs to new location to ensure they get rotated { mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \ || mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker } >/dev/null 2>/dev/null || : fi %preun cli %if %{defined _unitdir} %systemd_preun crm_mon.service %endif %postun cli %if %{defined _unitdir} %systemd_postun_with_restart crm_mon.service %endif %pre -n %{pkgname_pcmk_libs} getent group %{gname} >/dev/null || groupadd -r %{gname} -g %{hacluster_id} getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} -s /sbin/nologin -c "cluster user" %{uname} exit 0 %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets -n %{pkgname_pcmk_libs} %ldconfig_scriptlets cluster-libs %else %post -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %postun -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %post cluster-libs -p /sbin/ldconfig %postun cluster-libs -p /sbin/ldconfig %endif %files ########################################################### %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %if %{defined _unitdir} %{_unitdir}/pacemaker.service %else %{_initrddir}/pacemaker %endif %exclude %{_libexecdir}/pacemaker/cts-log-watcher %exclude %{_libexecdir}/pacemaker/cts-support %exclude %{_sbindir}/pacemaker-remoted %exclude %{_sbindir}/pacemaker_remoted %{_libexecdir}/pacemaker/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/fence_legacy %doc %{_mandir}/man7/pacemaker-controld.* %doc %{_mandir}/man7/pacemaker-schedulerd.* %doc %{_mandir}/man7/pacemaker-fenced.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* %doc %{_mandir}/man7/ocf_pacemaker_o2cb.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/fence_legacy.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_datadir}/pacemaker/alerts %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine /usr/lib/ocf/resource.d/pacemaker/controld /usr/lib/ocf/resource.d/pacemaker/o2cb /usr/lib/ocf/resource.d/pacemaker/remote %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/pacemaker.conf %config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf %endif %files cli %dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %if %{defined _unitdir} %{_unitdir}/crm_mon.service %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/crm_mon.conf %endif %{_sbindir}/attrd_updater %{_sbindir}/cibadmin %if %{with cibsecrets} %{_sbindir}/cibsecret %endif %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount %{_sbindir}/crm_mon %{_sbindir}/crm_node %{_sbindir}/crm_resource %{_sbindir}/crm_rule %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/crm_shadow %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket %{_sbindir}/stonith_admin # "dirname" is owned by -schemas, which is a prerequisite %{_datadir}/pacemaker/report.collector %{_datadir}/pacemaker/report.common # XXX "dirname" is not owned by any prerequisite %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude /usr/lib/ocf/resource.d/pacemaker/controld %exclude /usr/lib/ocf/resource.d/pacemaker/o2cb %exclude /usr/lib/ocf/resource.d/pacemaker/remote %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %doc %{_mandir}/man7/* %exclude %{_mandir}/man7/pacemaker-controld.* %exclude %{_mandir}/man7/pacemaker-schedulerd.* %exclude %{_mandir}/man7/pacemaker-fenced.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_master.* %exclude %{_mandir}/man8/fence_legacy.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles %files -n %{pkgname_pcmk_libs} %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpacemaker.so.* %{_libdir}/libstonithd.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files cluster-libs %{_libdir}/libcrmcluster.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files remote %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} # state directory is shared between the subpackets # let rpm take care of removing it once it isn't # referenced anymore and empty %ghost %dir %{_localstatedir}/lib/rpm-state/%{name} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote %endif %{_sbindir}/pacemaker-remoted %{_sbindir}/pacemaker_remoted %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files doc %doc %{pcmk_docdir} %license licenses/CC-BY-SA-4.0 %files cts %{python_site}/cts %{_datadir}/pacemaker/tests %{_libexecdir}/pacemaker/cts-log-watcher %{_libexecdir}/pacemaker/cts-support %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files -n %{pkgname_pcmk_libs}-devel %{_includedir}/pacemaker %{_libdir}/*.so %if %{with coverage} %{_var}/lib/pacemaker/gcov %endif %{_libdir}/pkgconfig/*.pc %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files schemas %license licenses/GPLv2 %dir %{_datadir}/pacemaker %{_datadir}/pacemaker/*.rng %{_datadir}/pacemaker/*.xsl %{_datadir}/pacemaker/api %{_datadir}/pkgconfig/pacemaker-schemas.pc %changelog * PACKAGE_DATE ClusterLabs PACKAGE_VERSION-1 - See included ChangeLog file for details diff --git a/tools/crm_resource.c b/tools/crm_resource.c index a10e50f4dd..29b0a0413d 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,2097 +1,2097 @@ /* * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources" enum rsc_command { cmd_none = 0, // No command option given (yet) cmd_ban, cmd_cleanup, cmd_clear, cmd_colocations, cmd_colocations_deep, cmd_cts, cmd_delete, cmd_delete_param, cmd_digests, cmd_execute_agent, cmd_fail, cmd_get_param, cmd_get_property, cmd_list_active_ops, cmd_list_agents, cmd_list_all_ops, cmd_list_alternatives, cmd_list_instances, cmd_list_providers, cmd_list_resources, cmd_list_standards, cmd_locate, cmd_metadata, cmd_move, cmd_query_raw_xml, cmd_query_xml, cmd_refresh, cmd_restart, cmd_set_param, cmd_set_property, cmd_wait, cmd_why, }; struct { enum rsc_command rsc_cmd; // crm_resource command to perform // Infrastructure that given command needs to work gboolean require_cib; // Whether command requires CIB IPC int cib_options; // Options to use with CIB IPC calls gboolean require_crmd; // Whether command requires controller IPC gboolean require_dataset; // Whether command requires populated data set gboolean require_resource; // Whether command requires resource specified gboolean require_node; // Whether command requires node specified int find_flags; // Flags to use when searching for resource // Command-line option values gchar *rsc_id; // Value of --resource gchar *rsc_type; // Value of --resource-type gboolean force; // --force was given gboolean clear_expired; // --expired was given gboolean recursive; // --recursive was given gboolean promoted_role_only; // --master was given gchar *host_uname; // Value of --node gchar *interval_spec; // Value of --interval gchar *move_lifetime; // Value of --lifetime gchar *operation; // Value of --operation const char *attr_set_type; // Instance, meta, or utilization attribute gchar *prop_id; // --nvpair (attribute XML ID) char *prop_name; // Attribute name gchar *prop_set; // --set-name (attribute block XML ID) gchar *prop_value; // --parameter-value (attribute value) int timeout_ms; // Parsed from --timeout value char *agent_spec; // Standard and/or provider and/or agent gchar *xml_file; // Value of (deprecated) --xml-file // Resource configuration specified via command-line arguments gboolean cmdline_config; // Resource configuration was via arguments char *v_agent; // Value of --agent char *v_class; // Value of --class char *v_provider; // Value of --provider GHashTable *cmdline_params; // Resource parameters specified // Positional command-line arguments gchar **remainder; // Positional arguments as given GHashTable *override_params; // Resource parameter values that override config } options = { .attr_set_type = XML_TAG_ATTR_SETS, .cib_options = cib_sync_call, .require_cib = TRUE, .require_dataset = TRUE, .require_resource = TRUE, }; #if 0 // @COMPAT @TODO enable this at next backward compatibility break #define SET_COMMAND(cmd) do { \ if (options.rsc_cmd != cmd_none) { \ g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE, \ "Only one command option may be specified"); \ return FALSE; \ } \ options.rsc_cmd = (cmd); \ } while (0) #else #define SET_COMMAND(cmd) do { options.rsc_cmd = (cmd); } while (0) #endif gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean option_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean restart_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean digests_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); static crm_exit_t exit_code = CRM_EX_OK; static pcmk__output_t *out = NULL; // Things that should be cleaned up on exit static GError *error = NULL; static GMainLoop *mainloop = NULL; static cib_t *cib_conn = NULL; static pcmk_ipc_api_t *controld_api = NULL; static pe_working_set_t *data_set = NULL; #define MESSAGE_TIMEOUT_S 60 #define INDENT " " static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; // Clean up and exit static crm_exit_t bye(crm_exit_t ec) { pcmk__output_and_clear_error(error, out); if (out != NULL) { out->finish(out, ec, true, NULL); pcmk__output_free(out); } if (cib_conn != NULL) { cib_t *save_cib_conn = cib_conn; cib_conn = NULL; // Ensure we can't free this twice save_cib_conn->cmds->signoff(save_cib_conn); cib_delete(save_cib_conn); } if (controld_api != NULL) { pcmk_ipc_api_t *save_controld_api = controld_api; controld_api = NULL; // Ensure we can't free this twice pcmk_free_ipc_api(save_controld_api); } if (mainloop != NULL) { g_main_loop_unref(mainloop); mainloop = NULL; } pe_free_working_set(data_set); data_set = NULL; crm_exit(ec); return ec; } static void quit_main_loop(crm_exit_t ec) { exit_code = ec; if (mainloop != NULL) { GMainLoop *mloop = mainloop; mainloop = NULL; // Don't re-enter this block pcmk_quit_main_loop(mloop, 10); g_main_loop_unref(mloop); } } static gboolean resource_ipc_timeout(gpointer data) { // Start with newline because "Waiting for ..." message doesn't have one if (error != NULL) { g_clear_error(&error); } g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT, "Aborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S); quit_main_loop(CRM_EX_TIMEOUT); return FALSE; } static void controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data, void *user_data) { switch (event_type) { case pcmk_ipc_event_disconnect: if (exit_code == CRM_EX_DISCONNECT) { // Unexpected crm_info("Connection to controller was terminated"); } quit_main_loop(exit_code); break; case pcmk_ipc_event_reply: if (status != CRM_EX_OK) { out->err(out, "Error: bad reply from controller: %s", crm_exit_str(status)); pcmk_disconnect_ipc(api); quit_main_loop(status); } else { if ((pcmk_controld_api_replies_expected(api) == 0) && mainloop && g_main_loop_is_running(mainloop)) { out->info(out, "... got reply (done)"); crm_debug("Got all the replies we expected"); pcmk_disconnect_ipc(api); quit_main_loop(CRM_EX_OK); } else { out->info(out, "... got reply"); } } break; default: break; } } static void start_mainloop(pcmk_ipc_api_t *capi) { unsigned int count = pcmk_controld_api_replies_expected(capi); if (count > 0) { out->info(out, "Waiting for %d %s from the controller", count, pcmk__plural_alt(count, "reply", "replies")); exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects mainloop = g_main_loop_new(NULL, FALSE); g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); g_main_loop_run(mainloop); } } static int compare_id(gconstpointer a, gconstpointer b) { return strcmp((const char *)a, (const char *)b); } static GListPtr build_constraint_list(xmlNode *root) { GListPtr retval = NULL; xmlNode *cib_constraints = NULL; xmlXPathObjectPtr xpathObj = NULL; int ndx = 0; cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root); xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION); for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) { xmlNode *match = getXpathResult(xpathObj, ndx); retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id); } freeXpathObject(xpathObj); return retval; } /* short option letters still available: eEJkKXyYZ */ static GOptionEntry query_entries[] = { { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List all cluster resources with status", NULL }, { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List IDs of all instantiated resources (individual members\n" INDENT "rather than groups etc.)", NULL }, { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, NULL, NULL }, { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List active resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List all resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_standards_cb, "List supported standards", NULL }, { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_providers_cb, "List all available OCF providers", NULL }, - { "list-agents", 0, 0, G_OPTION_ARG_CALLBACK, + { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, list_agents_cb, "List all agents available for the named standard and/or provider", "STD/PROV" }, - { "list-ocf-alternatives", 0, 0, G_OPTION_ARG_CALLBACK, + { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, list_alternatives_cb, "List all available providers for the named OCF agent", "AGENT" }, - { "show-metadata", 0, 0, G_OPTION_ARG_CALLBACK, + { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, metadata_cb, "Show the metadata for the named class:provider:agent", "SPEC" }, { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show XML configuration of resource (after any template expansion)", NULL }, { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show XML configuration of resource (before any template expansion)", NULL }, - { "get-parameter", 'g', 0, G_OPTION_ARG_CALLBACK, get_param_prop_cb, + { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb, "Display named parameter for resource (use instance attribute\n" INDENT "unless --meta or --utilization is specified)", "PARAM" }, { "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb, "Display named property of resource ('class', 'type', or 'provider') " "(requires --resource)", "PROPERTY" }, { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show node(s) currently running resource", NULL }, { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Display the (co)location constraints that apply to a resource\n" INDENT "and the resources is it colocated with", NULL }, { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Display the (co)location constraints that apply to a resource", NULL }, { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb, "Show why resources are not running, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { NULL } }; static GOptionEntry command_entries[] = { { "validate", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "Validate resource configuration by calling agent's validate-all\n" INDENT "action. The configuration may be specified either by giving an\n" INDENT "existing resource name with -r, or by specifying --class,\n" INDENT "--agent, and --provider arguments, along with any number of\n" INDENT "--option arguments.", NULL }, { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb, "If resource has any past failures, clear its history and fail\n" INDENT "count. Optionally filtered by --resource, --node, --operation\n" INDENT "and --interval (otherwise all). --operation and --interval\n" INDENT "apply to fail counts, but entire history is always clear, to\n" INDENT "allow current state to be rechecked. If the named resource is\n" INDENT "part of a group, or one numbered instance of a clone or bundled\n" INDENT "resource, the clean-up applies to the whole collective resource\n" INDENT "unless --force is given.", NULL }, { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb, "Delete resource's history (including failures) so its current state\n" INDENT "is rechecked. Optionally filtered by --resource and --node\n" INDENT "(otherwise all). If the named resource is part of a group, or one\n" INDENT "numbered instance of a clone or bundled resource, the refresh\n" INDENT "applies to the whole collective resource unless --force is given.", NULL }, - { "set-parameter", 'p', 0, G_OPTION_ARG_CALLBACK, set_delete_param_cb, + { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb, "Set named parameter for resource (requires -v). Use instance\n" INDENT "attribute unless --meta or --utilization is specified.", "PARAM" }, - { "delete-parameter", 'd', 0, G_OPTION_ARG_CALLBACK, set_delete_param_cb, + { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb, "Delete named parameter for resource. Use instance attribute\n" INDENT "unless --meta or --utilization is specified.", "PARAM" }, { "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb, "Set named property of resource ('class', 'type', or 'provider') " "(requires -r, -t, -v)", "PROPERTY" }, { NULL } }; static GOptionEntry location_entries[] = { { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Create a constraint to move resource. If --node is specified,\n" INDENT "the constraint will be to move to that node, otherwise it\n" INDENT "will be to ban the current node. Unless --force is specified\n" INDENT "this will return an error if the resource is already running\n" INDENT "on the specified node. If --force is specified, this will\n" INDENT "always ban the current node.\n" INDENT "Optional: --lifetime, --master. NOTE: This may prevent the\n" INDENT "resource from running on its previous location until the\n" INDENT "implicit constraint expires or is removed with --clear.", NULL }, { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Create a constraint to keep resource off a node.\n" INDENT "Optional: --node, --lifetime, --master.\n" INDENT "NOTE: This will prevent the resource from running on the\n" INDENT "affected node until the implicit constraint expires or is\n" INDENT "removed with --clear. If --node is not specified, it defaults\n" INDENT "to the node currently running the resource for primitives\n" INDENT "and groups, or the master for promotable clones with\n" INDENT "promoted-max=1 (all other situations result in an error as\n" INDENT "there is no sane default).", NULL }, { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Remove all constraints created by the --ban and/or --move\n" INDENT "commands. Requires: --resource. Optional: --node, --master,\n" INDENT "--expired. If --node is not specified, all constraints created\n" INDENT "by --ban and --move will be removed for the named resource. If\n" INDENT "--node and --force are specified, any constraint created by\n" INDENT "--move will be cleared, even if it is not for the specified\n" INDENT "node. If --expired is specified, only those constraints whose\n" INDENT "lifetimes have expired will be removed.", NULL }, { "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb, "Modifies the --clear argument to remove constraints with\n" INDENT "expired lifetimes.", NULL }, - { "lifetime", 'u', 0, G_OPTION_ARG_STRING, &options.move_lifetime, + { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime, "Lifespan (as ISO 8601 duration) of created constraints (with\n" INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)", "TIMESPEC" }, - { "master", 0, 0, G_OPTION_ARG_NONE, &options.promoted_role_only, + { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only, "Limit scope of command to Master role (with -B, -M, -U). For\n" INDENT "-B and -M the previous master may remain active in the Slave role.", NULL }, { NULL } }; static GOptionEntry advanced_entries[] = { { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb, "(Advanced) Delete a resource from the CIB. Required: -t", NULL }, { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb, "(Advanced) Tell the cluster this resource has failed", NULL }, { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb, "(Advanced) Tell the cluster to restart this resource and\n" INDENT "anything that depends on it", NULL }, { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb, "(Advanced) Wait until the cluster settles into a stable state", NULL }, { "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, digests_cb, "(Advanced) Show parameter hashes that Pacemaker uses to detect\n" INDENT "configuration changes (only accurate if there is resource\n" INDENT "history on the specified node). Required: --resource, --node.\n" INDENT "Optional: any NAME=VALUE parameters will be used to override\n" INDENT "the configuration (to see what the hash would be with those\n" INDENT "changes).", NULL }, { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and demote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and stop a resource on the local node", NULL }, { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and start a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and promote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-check", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and check the state of a resource on\n" INDENT "the local node", NULL }, { NULL } }; static GOptionEntry addl_entries[] = { - { "node", 'N', 0, G_OPTION_ARG_STRING, &options.host_uname, + { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname, "Node name", "NAME" }, - { "recursive", 0, 0, G_OPTION_ARG_NONE, &options.recursive, + { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive, "Follow colocation chains when using --set-parameter", NULL }, - { "resource-type", 't', 0, G_OPTION_ARG_STRING, &options.rsc_type, + { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type, "Resource XML element (primitive, group, etc.) (with -D)", "ELEMENT" }, - { "parameter-value", 'v', 0, G_OPTION_ARG_STRING, &options.prop_value, + { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value, "Value to use with -p", "PARAM" }, { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource meta-attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource utilization attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, - { "operation", 'n', 0, G_OPTION_ARG_STRING, &options.operation, + { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation, "Operation to clear instead of all (with -C -r)", "OPERATION" }, - { "interval", 'I', 0, G_OPTION_ARG_STRING, &options.interval_spec, + { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec, "Interval of operation to clear (default 0) (with -C -r -n)", "N" }, - { "class", 0, 0, G_OPTION_ARG_CALLBACK, class_cb, + { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb, "The standard the resource agent conforms to (for example, ocf).\n" INDENT "Use with --agent, --provider, --option, and --validate.", "CLASS" }, - { "agent", 0, 0, G_OPTION_ARG_CALLBACK, agent_provider_cb, + { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, "The agent to use (for example, IPaddr). Use with --class,\n" INDENT "--provider, --option, and --validate.", "AGENT" }, - { "provider", 0, 0, G_OPTION_ARG_CALLBACK, agent_provider_cb, + { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, "The vendor that supplies the resource agent (for example,\n" INDENT "heartbeat). Use with --class, --agent, --option, and --validate.", "PROVIDER" }, - { "option", 0, 0, G_OPTION_ARG_CALLBACK, option_cb, + { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb, "Specify a device configuration parameter as NAME=VALUE (may be\n" INDENT "specified multiple times). Use with --validate and without the\n" INDENT "-r option.", "PARAM" }, - { "set-name", 's', 0, G_OPTION_ARG_STRING, &options.prop_set, + { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set, "(Advanced) XML ID of attributes element to use (with -p, -d)", "ID" }, - { "nvpair", 'i', 0, G_OPTION_ARG_STRING, &options.prop_id, + { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id, "(Advanced) XML ID of nvpair element to use (with -p, -d)", "ID" }, - { "timeout", 'T', 0, G_OPTION_ARG_CALLBACK, timeout_cb, + { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb, "(Advanced) Abort if command does not finish in this time (with\n" INDENT "--restart, --wait, --force-*)", "N" }, - { "force", 'f', 0, G_OPTION_ARG_NONE, &options.force, + { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force, "If making CIB changes, do so regardless of quorum. See help for\n" INDENT "individual commands for additional behavior.", NULL }, { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file, NULL, "FILE" }, { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname, NULL, "HOST" }, { NULL } }; gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.cmdline_config = TRUE; options.require_resource = FALSE; if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) { if (options.v_provider) { free(options.v_provider); } options.v_provider = strdup(optarg); } else { if (options.v_agent) { free(options.v_agent); } options.v_agent = strdup(optarg); } return TRUE; } gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) { options.attr_set_type = XML_TAG_META_SETS; } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) { options.attr_set_type = XML_TAG_UTILIZATION; } return TRUE; } gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) { if (!out->is_quiet(out)) { g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, "Standard %s does not support parameters\n", optarg); } return FALSE; } else { if (options.v_class != NULL) { free(options.v_class); } options.v_class = strdup(optarg); } options.cmdline_config = TRUE; options.require_resource = FALSE; return TRUE; } gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) { SET_COMMAND(cmd_cleanup); } else { SET_COMMAND(cmd_refresh); } options.require_resource = FALSE; if (getenv("CIB_file") == NULL) { options.require_crmd = TRUE; } options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_dataset = FALSE; SET_COMMAND(cmd_delete); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.clear_expired = TRUE; options.require_resource = FALSE; return TRUE; } static void get_agent_spec(const gchar *optarg) { options.require_cib = FALSE; options.require_dataset = FALSE; options.require_resource = FALSE; if (options.agent_spec != NULL) { free(options.agent_spec); options.agent_spec = NULL; } if (optarg != NULL) { options.agent_spec = strdup(optarg); } } gboolean list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_agents); get_agent_spec(optarg); return TRUE; } gboolean list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_providers); get_agent_spec(optarg); return TRUE; } gboolean list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_standards); options.require_cib = FALSE; options.require_dataset = FALSE; options.require_resource = FALSE; return TRUE; } gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_alternatives); get_agent_spec(optarg); return TRUE; } gboolean metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_metadata); get_agent_spec(optarg); return TRUE; } gboolean option_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { char *name = NULL; char *value = NULL; if (pcmk_scan_nvpair(optarg, &name, &value) != 2) { return FALSE; } if (options.cmdline_params == NULL) { options.cmdline_params = crm_str_table_new(); } g_hash_table_replace(options.cmdline_params, name, value); return TRUE; } gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_crmd = TRUE; options.require_node = TRUE; SET_COMMAND(cmd_fail); return TRUE; } gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_clear); } else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_ban); } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_move); } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) { options.find_flags = pe_find_renamed|pe_find_any; SET_COMMAND(cmd_query_xml); } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) { options.find_flags = pe_find_renamed|pe_find_any; SET_COMMAND(cmd_query_raw_xml); } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_locate); } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_colocations_deep); } else { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_colocations); } return TRUE; } gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) { SET_COMMAND(cmd_get_param); } else { SET_COMMAND(cmd_get_property); } if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) { SET_COMMAND(cmd_cts); } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) { SET_COMMAND(cmd_list_resources); } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) { SET_COMMAND(cmd_list_instances); } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) { SET_COMMAND(cmd_list_active_ops); } else { SET_COMMAND(cmd_list_all_ops); } options.require_resource = FALSE; return TRUE; } gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) { SET_COMMAND(cmd_set_param); } else { SET_COMMAND(cmd_delete_param); } if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_dataset = FALSE; if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); SET_COMMAND(cmd_set_property); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.timeout_ms = crm_get_msec(optarg); return TRUE; } gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_execute_agent); if (options.operation) { g_free(options.operation); } options.operation = g_strdup(option_name + 2); // skip "--" options.find_flags = pe_find_renamed|pe_find_anon; if (options.override_params == NULL) { options.override_params = crm_str_table_new(); } return TRUE; } gboolean restart_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_restart); options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } gboolean digests_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_digests); options.find_flags = pe_find_renamed|pe_find_anon; if (options.override_params == NULL) { options.override_params = crm_str_table_new(); } options.require_node = TRUE; options.require_dataset = TRUE; return TRUE; } gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_wait); options.require_resource = FALSE; options.require_dataset = FALSE; return TRUE; } gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_resource = FALSE; SET_COMMAND(cmd_why); options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } static int ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; pe_node_t *current = NULL; unsigned int nactive = 0; CRM_CHECK(rsc != NULL, return EINVAL); current = pe__find_active_requires(rsc, &nactive); if (nactive == 1) { rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { int count = 0; GListPtr iter = NULL; current = NULL; for(iter = rsc->children; iter; iter = iter->next) { pe_resource_t *child = (pe_resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { count++; current = pe__current_node(child); } } if(count == 1 && current) { rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } else { rc = EINVAL; *exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Resource '%s' not moved: active in %d locations (promoted in %d).\n" "To prevent '%s' from running on a specific location, " "specify a node." "To prevent '%s' from being promoted at a specific " "location, specify a node and the master option.", options.rsc_id, nactive, count, options.rsc_id, options.rsc_id); } } else { rc = EINVAL; *exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Resource '%s' not moved: active in %d locations.\n" "To prevent '%s' from running on a specific location, " "specify a node.", options.rsc_id, nactive, options.rsc_id); } return rc; } static void cleanup(pcmk__output_t *out, pe_resource_t *rsc) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Erasing failures of %s (%s requested) on %s", rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, options.operation, options.interval_spec, TRUE, data_set, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, cib_conn, rsc); } if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } static int clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) { GListPtr before = NULL; GListPtr after = NULL; GListPtr remaining = NULL; GListPtr ele = NULL; pe_node_t *dest = NULL; int rc = pcmk_rc_ok; if (!out->is_quiet(out)) { before = build_constraint_list(data_set->input); } if (options.clear_expired) { rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options, options.rsc_id, options.host_uname, options.promoted_role_only); } else if (options.host_uname) { dest = pe_find_node(data_set->nodes, options.host_uname); if (dest == NULL) { rc = pcmk_rc_node_unknown; if (!out->is_quiet(out)) { g_list_free(before); } return rc; } rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL, cib_conn, options.cib_options, TRUE, options.force); } else { rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes, cib_conn, options.cib_options, TRUE, options.force); } if (!out->is_quiet(out)) { rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Could not get modified CIB: %s\n", pcmk_strerror(rc)); g_list_free(before); return rc; } data_set->input = *cib_xml_copy; cluster_status(data_set); after = build_constraint_list(data_set->input); remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp); for (ele = remaining; ele != NULL; ele = ele->next) { out->info(out, "Removing constraint: %s", (char *) ele->data); } g_list_free(before); g_list_free(after); g_list_free(remaining); } return rc; } static int delete(void) { int rc = pcmk_rc_ok; xmlNode *msg_data = NULL; if (options.rsc_type == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "You need to specify a resource type with -t"); return rc; } msg_data = create_xml_node(NULL, options.rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id); rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, options.cib_options); rc = pcmk_legacy2rc(rc); free_xml(msg_data); return rc; } static int list_agents(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; char *provider = strchr(agent_spec, ':'); lrmd_t *lrmd_conn = lrmd_api_new(); lrmd_list_t *list = NULL; if (provider) { *provider++ = 0; } rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider); if (rc > 0) { rc = out->message(out, "agents-list", list, agent_spec, provider); } else { rc = pcmk_rc_error; } if (rc != pcmk_rc_ok) { *exit_code = CRM_EX_NOSUCH; if (provider == NULL) { g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No agents found for standard '%s'", agent_spec); } else { g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No agents found for standard '%s' and provider '%s'", agent_spec, provider); } } lrmd_api_delete(lrmd_conn); return rc; } static int list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc; const char *text = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); lrmd_list_t *list = NULL; switch (options.rsc_cmd) { case cmd_list_alternatives: rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); if (rc > 0) { rc = out->message(out, "alternatives-list", list, agent_spec); } else { rc = pcmk_rc_error; } text = "OCF providers"; break; case cmd_list_standards: rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); if (rc > 0) { rc = out->message(out, "standards-list", list); } else { rc = pcmk_rc_error; } text = "standards"; break; case cmd_list_providers: rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); if (rc > 0) { rc = out->message(out, "providers-list", list, agent_spec); } else { rc = pcmk_rc_error; } text = "OCF providers"; break; default: *exit_code = CRM_EX_SOFTWARE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Bug"); lrmd_api_delete(lrmd_conn); return pcmk_rc_error; } if (rc != pcmk_rc_ok) { if (agent_spec != NULL) { *exit_code = CRM_EX_NOSUCH; rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No %s found for %s", text, agent_spec); } else { *exit_code = CRM_EX_NOSUCH; rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No %s found", text); } } lrmd_api_delete(lrmd_conn); return rc; } static int populate_working_set(xmlNodePtr *cib_xml_copy) { int rc = pcmk_rc_ok; if (options.xml_file != NULL) { *cib_xml_copy = filename2xml(options.xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); } if(rc != pcmk_rc_ok) { return rc; } /* Populate the working set instance */ data_set = pe_new_working_set(); if (data_set == NULL) { rc = ENOMEM; return rc; } pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); rc = update_working_set_xml(data_set, cib_xml_copy); if (rc == pcmk_rc_ok) { cluster_status(data_set); } return rc; } static int refresh(pcmk__output_t *out) { int rc = pcmk_rc_ok; const char *router_node = options.host_uname; int attr_options = pcmk__node_attr_none; if (options.host_uname) { pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname); if (pe__is_guest_or_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "No cluster connection to Pacemaker Remote node %s detected", options.host_uname); return rc; } router_node = node->details->uname; attr_options |= pcmk__node_attr_remote; } } if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", options.host_uname? options.host_uname : "all nodes"); rc = pcmk_rc_ok; return rc; } crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes"); rc = pcmk__node_attr_request_clear(NULL, options.host_uname, NULL, NULL, NULL, NULL, attr_options); if (pcmk_controld_api_reprobe(controld_api, options.host_uname, router_node) == pcmk_rc_ok) { start_mainloop(controld_api); } return rc; } static void refresh_resource(pcmk__output_t *out, pe_resource_t *rsc) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Re-checking the state of %s (%s requested) on %s", rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, NULL, 0, FALSE, data_set, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, cib_conn, rsc); } if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } static int set_property(void) { int rc = pcmk_rc_ok; xmlNode *msg_data = NULL; if (pcmk__str_empty(options.rsc_type)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Must specify -t with resource type"); rc = ENXIO; return rc; } else if (pcmk__str_empty(options.prop_value)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Must supply -v with new value"); rc = EINVAL; return rc; } CRM_LOG_ASSERT(options.prop_name != NULL); msg_data = create_xml_node(NULL, options.rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id); crm_xml_add(msg_data, options.prop_name, options.prop_value); rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, options.cib_options); rc = pcmk_legacy2rc(rc); free_xml(msg_data); return rc; } static int show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; char *standard = NULL; char *provider = NULL; char *type = NULL; char *metadata = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, provider, type, &metadata, 0); rc = pcmk_legacy2rc(rc); if (metadata) { out->output_xml(out, "metadata", metadata); } else { *exit_code = crm_errno2exit(rc); g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Metadata query for %s failed: %s", agent_spec, pcmk_rc_str(rc)); } } else { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "'%s' is not a valid agent specification", agent_spec); } lrmd_api_delete(lrmd_conn); return rc; } static void validate_cmdline_config(void) { // Cannot use both --resource and command-line resource configuration if (options.rsc_id != NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--resource cannot be used with --class, --agent, and --provider"); // Not all commands support command-line resource configuration } else if (options.rsc_cmd != cmd_execute_agent) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--class, --agent, and --provider can only be used with " "--validate"); // Not all of --class, --agent, and --provider need to be given. Not all // classes support the concept of a provider. Check that what we were given // is valid. } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) { if (options.v_provider != NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "stonith does not support providers"); } else if (stonith_agent_exists(options.v_agent, 0) == FALSE) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s is not a known stonith agent", options.v_agent ? options.v_agent : ""); } } else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s:%s:%s is not a known resource", options.v_class ? options.v_class : "", options.v_provider ? options.v_provider : "", options.v_agent ? options.v_agent : ""); } if (error != NULL) { return; } if (options.cmdline_params == NULL) { options.cmdline_params = crm_str_table_new(); } options.require_resource = FALSE; options.require_dataset = FALSE; options.require_cib = FALSE; } static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; GOptionEntry extra_prog_entries[] = { - { "quiet", 'Q', 0, G_OPTION_ARG_NONE, &(args->quiet), + { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet), "Be less descriptive in output.", NULL }, - { "resource", 'r', 0, G_OPTION_ARG_STRING, &options.rsc_id, + { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id, "Resource ID", "ID" }, - { G_OPTION_REMAINING, 0, 0, G_OPTION_ARG_STRING_ARRAY, &options.remainder, + { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder, NULL, NULL }, { NULL } }; const char *description = "Examples:\n\n" "List the available OCF agents:\n\n" "\t# crm_resource --list-agents ocf\n\n" "List the available OCF agents from the linux-ha project:\n\n" "\t# crm_resource --list-agents ocf:heartbeat\n\n" "Move 'myResource' to a specific node:\n\n" "\t# crm_resource --resource myResource --move --node altNode\n\n" "Allow (but not force) 'myResource' to move back to its original " "location:\n\n" "\t# crm_resource --resource myResource --clear\n\n" "Stop 'myResource' (and anything that depends on it):\n\n" "\t# crm_resource --resource myResource --set-parameter target-role " "--meta --parameter-value Stopped\n\n" "Tell the cluster not to manage 'myResource' (the cluster will not " "attempt to start or stop the\n" "resource under any circumstances; useful when performing maintenance " "tasks on a resource):\n\n" "\t# crm_resource --resource myResource --set-parameter is-managed " "--meta --parameter-value false\n\n" "Erase the operation history of 'myResource' on 'aNode' (the cluster " "will 'forget' the existing\n" "resource state, including any errors, and attempt to recover the" "resource; useful when a resource\n" "had failed permanently and has been repaired by an administrator):\n\n" "\t# crm_resource --resource myResource --cleanup --node aNode\n\n"; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); g_option_context_set_description(context, description); /* Add the -Q option, which cannot be part of the globally supported options * because some tools use that flag for something else. */ pcmk__add_main_args(context, extra_prog_entries); pcmk__add_arg_group(context, "queries", "Queries:", "Show query help", query_entries); pcmk__add_arg_group(context, "commands", "Commands:", "Show command help", command_entries); pcmk__add_arg_group(context, "locations", "Locations:", "Show location help", location_entries); pcmk__add_arg_group(context, "advanced", "Advanced:", "Show advanced option help", advanced_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); return context; } int main(int argc, char **argv) { xmlNode *cib_xml_copy = NULL; pe_resource_t *rsc = NULL; pe_node_t *node = NULL; int rc = pcmk_rc_ok; /* * Parse command line arguments */ GOptionGroup *output_group = NULL; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); gchar **processed_args = pcmk__cmdline_preproc(argv, "GINSTdginpstuv"); GOptionContext *context = build_arg_context(args, &output_group); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } pcmk__cli_init_logging("crm_resource", args->verbosity); rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); goto done; } pe__register_messages(out); crm_resource_register_messages(out); lrmd__register_messages(out); pcmk__register_lib_messages(out); out->quiet = args->quiet; crm_log_args(argc, argv); /* * Validate option combinations */ // If the user didn't explicitly specify a command, list resources if (options.rsc_cmd == cmd_none) { options.rsc_cmd = cmd_list_resources; options.require_resource = FALSE; } // --expired without --clear/-U doesn't make sense if (options.clear_expired && (options.rsc_cmd != cmd_clear)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--expired requires --clear or -U"); goto done; } if ((options.remainder != NULL) && (options.override_params != NULL)) { // Commands that use positional arguments will create override_params for (gchar **s = options.remainder; *s; s++) { char *name = calloc(1, strlen(*s)); char *value = calloc(1, strlen(*s)); int rc = sscanf(*s, "%[^=]=%s", name, value); if (rc == 2) { g_hash_table_replace(options.override_params, name, value); } else { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Error parsing '%s' as a name=value pair", argv[optind]); free(value); free(name); goto done; } } } else if (options.remainder != NULL) { gchar **strv = NULL; gchar *msg = NULL; int i = 1; int len = 0; for (gchar **s = options.remainder; *s; s++) { len++; } CRM_ASSERT(len > 0); strv = calloc(len, sizeof(char *)); strv[0] = strdup("non-option ARGV-elements:"); for (gchar **s = options.remainder; *s; s++) { strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s); i++; } msg = g_strjoinv("", strv); g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s", msg); g_free(msg); for(i = 0; i < len; i++) { free(strv[i]); } free(strv); goto done; } if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) { /* Kind of a hack to display XML lists using a real tag instead of . This just * saves from having to write custom messages to build the lists around all these things */ switch (options.rsc_cmd) { case cmd_list_resources: case cmd_query_xml: case cmd_query_raw_xml: case cmd_list_active_ops: case cmd_list_all_ops: case cmd_colocations: case cmd_colocations_deep: pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname()); break; default: pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname()); break; } } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) { if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep || options.rsc_cmd == cmd_list_resources) { pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname()); } } if (args->version) { out->version(out, false); goto done; } if (options.cmdline_config) { /* A resource configuration was given on the command line. Sanity-check * the values and set error if they don't make sense. */ validate_cmdline_config(); if (error != NULL) { exit_code = CRM_EX_USAGE; goto done; } } else if (options.cmdline_params != NULL) { // @COMPAT @TODO error out here when we can break backward compatibility g_hash_table_destroy(options.cmdline_params); options.cmdline_params = NULL; } if (options.require_resource && (options.rsc_id == NULL)) { rc = ENXIO; exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply a resource id with -r"); goto done; } if (options.require_node && (options.host_uname == NULL)) { rc = ENXIO; exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply a node name with -N"); goto done; } /* * Set up necessary connections */ if (options.force) { crm_debug("Forcing..."); cib__set_call_options(options.cib_options, crm_system_name, cib_quorum_override); } if (options.find_flags && options.rsc_id) { options.require_dataset = TRUE; } // Establish a connection to the CIB if needed if (options.require_cib) { cib_conn = cib_new(); if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) { rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_DISCONNECT, "Could not create CIB connection"); goto done; } rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Could not connect to the CIB: %s", pcmk_rc_str(rc)); goto done; } } /* Populate working set from XML file if specified or CIB query otherwise */ if (options.require_dataset) { rc = populate_working_set(&cib_xml_copy); if (rc != pcmk_rc_ok) { goto done; } } // If command requires that resource exist if specified, find it if (options.find_flags && options.rsc_id) { rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id, options.find_flags); if (rsc == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "Resource '%s' not found", options.rsc_id); goto done; } } // If user supplied a node name, check whether it exists if ((options.host_uname != NULL) && (data_set != NULL)) { node = pe_find_node(data_set->nodes, options.host_uname); } // Establish a connection to the controller if needed if (options.require_crmd) { rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Error connecting to the controller: %s", pcmk_rc_str(rc)); goto done; } pcmk_register_ipc_callback(controld_api, controller_event_callback, NULL); rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Error connecting to the controller: %s", pcmk_rc_str(rc)); goto done; } } /* * Handle requested command */ switch (options.rsc_cmd) { case cmd_list_resources: { GListPtr all = NULL; all = g_list_prepend(all, strdup("*")); rc = out->message(out, "resource-list", data_set, pe_print_rsconly | pe_print_pending, FALSE, TRUE, FALSE, TRUE, all, all, FALSE); g_list_free_full(all, free); if (rc == pcmk_rc_no_output) { rc = ENXIO; } break; } case cmd_list_instances: rc = out->message(out, "resource-names-list", data_set->resources); if (rc != pcmk_rc_ok) { rc = ENXIO; } break; case cmd_list_standards: case cmd_list_providers: case cmd_list_alternatives: rc = list_providers(out, options.agent_spec, &exit_code); break; case cmd_list_agents: rc = list_agents(out, options.agent_spec, &exit_code); break; case cmd_metadata: rc = show_metadata(out, options.agent_spec, &exit_code); break; case cmd_restart: /* We don't pass data_set because rsc needs to stay valid for the * entire lifetime of cli_resource_restart(), but it will reset and * update the working set multiple times, so it needs to use its own * copy. */ rc = cli_resource_restart(out, rsc, options.host_uname, options.move_lifetime, options.timeout_ms, cib_conn, options.cib_options, options.promoted_role_only, options.force); break; case cmd_wait: rc = wait_till_stable(out, options.timeout_ms, cib_conn); break; case cmd_execute_agent: if (options.cmdline_config) { exit_code = cli_resource_execute_from_params(out, "test", options.v_class, options.v_provider, options.v_agent, "validate-all", options.cmdline_params, options.override_params, options.timeout_ms, args->verbosity, options.force); } else { exit_code = cli_resource_execute(out, rsc, options.rsc_id, options.operation, options.override_params, options.timeout_ms, cib_conn, data_set, args->verbosity, options.force); } break; case cmd_digests: node = pe_find_node(data_set->nodes, options.host_uname); if (node == NULL) { rc = pcmk_rc_node_unknown; } else { rc = pcmk__resource_digests(out, rsc, node, options.override_params, data_set); } break; case cmd_colocations: rc = out->message(out, "stacks-constraints", rsc, data_set, false); break; case cmd_colocations_deep: rc = out->message(out, "stacks-constraints", rsc, data_set, true); break; case cmd_cts: rc = pcmk_rc_ok; for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { rsc = (pe_resource_t *) lpc->data; cli_resource_print_cts(out, rsc); } cli_resource_print_cts_constraints(out, data_set); break; case cmd_fail: rc = cli_resource_fail(out, controld_api, options.host_uname, options.rsc_id, data_set); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } break; case cmd_list_active_ops: rc = cli_resource_print_operations(out, options.rsc_id, options.host_uname, TRUE, data_set); break; case cmd_list_all_ops: rc = cli_resource_print_operations(out, options.rsc_id, options.host_uname, FALSE, data_set); break; case cmd_locate: { GListPtr nodes = cli_resource_search(out, rsc, options.rsc_id, data_set); rc = out->message(out, "resource-search-list", nodes, options.rsc_id); g_list_free_full(nodes, free); break; } case cmd_query_xml: rc = cli_resource_print(out, rsc, data_set, TRUE); break; case cmd_query_raw_xml: rc = cli_resource_print(out, rsc, data_set, FALSE); break; case cmd_why: if ((options.host_uname != NULL) && (node == NULL)) { rc = pcmk_rc_node_unknown; } else { rc = out->message(out, "resource-reasons-list", cib_conn, data_set->resources, rsc, node); } break; case cmd_clear: rc = clear_constraints(out, &cib_xml_copy); break; case cmd_move: if (options.host_uname == NULL) { rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); } else { rc = cli_resource_move(out, rsc, options.rsc_id, options.host_uname, options.move_lifetime, cib_conn, options.cib_options, data_set, options.promoted_role_only, options.force); } break; case cmd_ban: if (options.host_uname == NULL) { rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); } else if (node == NULL) { rc = pcmk_rc_node_unknown; } else { rc = cli_resource_ban(out, options.rsc_id, node->details->uname, options.move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } break; case cmd_get_property: rc = out->message(out, "property-list", rsc, options.prop_name); if (rc == pcmk_rc_no_output) { rc = ENXIO; } break; case cmd_set_property: rc = set_property(); break; case cmd_get_param: { unsigned int count = 0; GHashTable *params = NULL; pe_node_t *current = pe__find_active_on(rsc, &count, NULL); bool free_params = true; if (count > 1) { out->err(out, "%s is active on more than one node," " returning the default value for %s", rsc->id, crm_str(options.prop_name)); current = NULL; } crm_debug("Looking up %s in %s", options.prop_name, rsc->id); if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { params = pe_rsc_params(rsc, current, data_set); free_params = false; } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { params = crm_str_table_new(); get_meta_attributes(params, rsc, current, data_set); } else { params = crm_str_table_new(); pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params, NULL, FALSE, data_set); } rc = out->message(out, "attribute-list", rsc, options.prop_name, params); if (free_params) { g_hash_table_destroy(params); } break; } case cmd_set_param: if (pcmk__str_empty(options.prop_value)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "You need to supply a value with the -v option"); rc = EINVAL; goto done; } /* coverity[var_deref_model] False positive */ rc = cli_resource_update_attribute(out, rsc, options.rsc_id, options.prop_set, options.attr_set_type, options.prop_id, options.prop_name, options.prop_value, options.recursive, cib_conn, options.cib_options, data_set, options.force); break; case cmd_delete_param: /* coverity[var_deref_model] False positive */ rc = cli_resource_delete_attribute(out, rsc, options.rsc_id, options.prop_set, options.attr_set_type, options.prop_id, options.prop_name, cib_conn, options.cib_options, data_set, options.force); break; case cmd_cleanup: if (rsc == NULL) { rc = cli_cleanup_all(out, controld_api, options.host_uname, options.operation, options.interval_spec, data_set); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } else { cleanup(out, rsc); } break; case cmd_refresh: if (rsc == NULL) { rc = refresh(out); } else { refresh_resource(out, rsc); } break; case cmd_delete: rc = delete(); break; default: g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_SOFTWARE, "Unimplemented command: %d", (int) options.rsc_cmd); break; } /* * Clean up and exit */ done: /* Don't do any of this for pcmk_rc_no_output (doesn't make sense to show an * error message for no output) or for CRM_EX_USAGE (we don't want to show * an "error: OK" message from pcmk_rc_str). */ if ((rc != pcmk_rc_ok && rc != pcmk_rc_no_output) || (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE)) { if (rc == pcmk_rc_no_quorum) { g_prefix_error(&error, "To ignore quorum, use the force option.\n"); } if (error != NULL) { char *msg = crm_strdup_printf("%s\nError performing operation: %s", error->message, pcmk_rc_str(rc)); g_clear_error(&error); g_set_error(&error, PCMK__RC_ERROR, rc, "%s", msg); free(msg); } else { g_set_error(&error, PCMK__RC_ERROR, rc, "Error performing operation: %s", pcmk_rc_str(rc)); } } if (exit_code == CRM_EX_OK) { exit_code = pcmk_rc2exitc(rc); } g_free(options.host_uname); g_free(options.interval_spec); g_free(options.move_lifetime); g_free(options.operation); g_free(options.prop_id); free(options.prop_name); g_free(options.prop_set); g_free(options.prop_value); g_free(options.rsc_id); g_free(options.rsc_type); free(options.agent_spec); free(options.v_agent); free(options.v_class); free(options.v_provider); g_free(options.xml_file); g_strfreev(options.remainder); if (options.override_params != NULL) { g_hash_table_destroy(options.override_params); } /* options.cmdline_params does not need to be destroyed here. See the * comments in cli_resource_execute_from_params. */ g_strfreev(processed_args); g_option_context_free(context); return bye(exit_code); } diff --git a/tools/crm_verify.c b/tools/crm_verify.c index c0e3c6c822..1a9c5fc3f8 100644 --- a/tools/crm_verify.c +++ b/tools/crm_verify.c @@ -1,301 +1,301 @@ /* * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include const char *SUMMARY = "Check a Pacemaker configuration for errors\n\n" "Check the well-formedness of a complete Pacemaker XML configuration,\n" "its conformance to the configured schema, and the presence of common\n" "misconfigurations. Problems reported as errors must be fixed before the\n" "cluster will work properly. It is left to the administrator to decide\n" "whether to fix problems reported as warnings."; struct { char *cib_save; gboolean use_live_cib; char *xml_file; gboolean xml_stdin; char *xml_string; } options; extern gboolean stage0(pe_working_set_t * data_set); static GOptionEntry data_entries[] = { { "live-check", 'L', 0, G_OPTION_ARG_NONE, &options.use_live_cib, "Check the configuration used by the running cluster", NULL }, { "xml-file", 'x', 0, G_OPTION_ARG_FILENAME, &options.xml_file, "Check the configuration in the named file", "FILE" }, { "xml-pipe", 'p', 0, G_OPTION_ARG_NONE, &options.xml_string, "Check the configuration piped in via stdin", NULL }, { "xml-text", 'X', 0, G_OPTION_ARG_STRING, &options.xml_string, "Check the configuration in the supplied string", "XML" }, { NULL } }; static GOptionEntry addl_entries[] = { - { "save-xml", 'S', 0, G_OPTION_ARG_FILENAME, + { "save-xml", 'S', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME, &options.cib_save, "Save verified XML to named file (most useful with -L)", "FILE" }, { NULL } }; static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; const char *description = "Examples:\n\n" "Check the consistency of the configuration in the running cluster:\n\n" "\tcrm_verify --live-check\n\n" "Check the consistency of the configuration in a given file and " "produce verbose output:\n\n" "\tcrm_verify --xml-file file.xml --verbose\n\n"; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); g_option_context_set_description(context, description); pcmk__add_arg_group(context, "data", "Data sources:", "Show data options", data_entries); pcmk__add_arg_group(context, "additional", "Additional options:", "Show additional options", addl_entries); return context; } int main(int argc, char **argv) { xmlNode *cib_object = NULL; xmlNode *status = NULL; pe_working_set_t *data_set = NULL; cib_t *cib_conn = NULL; const char *xml_tag = NULL; int rc = pcmk_rc_ok; crm_exit_t exit_code = CRM_EX_OK; GError *error = NULL; pcmk__output_t *out = NULL; GOptionGroup *output_group = NULL; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); gchar **processed_args = pcmk__cmdline_preproc(argv, "xSX"); GOptionContext *context = build_arg_context(args, &output_group); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } pcmk__cli_init_logging("crm_verify", args->verbosity); rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); goto done; } if (args->version) { out->version(out, false); goto done; } crm_info("=#=#=#=#= Getting XML =#=#=#=#="); if (options.use_live_cib) { cib_conn = cib_new(); rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); } if (options.use_live_cib) { if (rc == pcmk_rc_ok) { int options = cib_scope_local | cib_sync_call; crm_info("Reading XML from: live cluster"); rc = cib_conn->cmds->query(cib_conn, NULL, &cib_object, options); rc = pcmk_legacy2rc(rc); } if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Live CIB query failed: %s", pcmk_rc_str(rc)); goto done; } if (cib_object == NULL) { rc = ENOMSG; g_set_error(&error, PCMK__RC_ERROR, rc, "Live CIB query failed: empty result"); goto done; } } else if (options.xml_file != NULL) { cib_object = filename2xml(options.xml_file); if (cib_object == NULL) { rc = ENODATA; g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input file: %s", options.xml_file); goto done; } } else if (options.xml_string != NULL) { cib_object = string2xml(options.xml_string); if (cib_object == NULL) { rc = ENODATA; g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input string: %s", options.xml_string); goto done; } } else if (options.xml_stdin) { cib_object = stdin2xml(); if (cib_object == NULL) { rc = ENODATA; g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input from STDIN."); goto done; } } else { rc = ENODATA; g_set_error(&error, PCMK__RC_ERROR, rc, "No configuration source specified. Use --help for usage information."); goto done; } xml_tag = crm_element_name(cib_object); if (!pcmk__str_eq(xml_tag, XML_TAG_CIB, pcmk__str_casei)) { rc = EBADMSG; g_set_error(&error, PCMK__RC_ERROR, rc, "This tool can only check complete configurations (i.e. those starting with )."); goto done; } if (options.cib_save != NULL) { write_xml_file(cib_object, options.cib_save, FALSE); } status = get_object_root(XML_CIB_TAG_STATUS, cib_object); if (status == NULL) { create_xml_node(cib_object, XML_CIB_TAG_STATUS); } if (validate_xml(cib_object, NULL, FALSE) == FALSE) { pcmk__config_err("CIB did not pass schema validation"); free_xml(cib_object); cib_object = NULL; } else if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) { pcmk__config_error = true; free_xml(cib_object); cib_object = NULL; out->err(out, "The cluster will NOT be able to use this configuration.\n" "Please manually update the configuration to conform to the %s syntax.", xml_latest_schema()); } data_set = pe_new_working_set(); if (data_set == NULL) { rc = errno; crm_perror(LOG_CRIT, "Unable to allocate working set"); goto done; } pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); if (cib_object == NULL) { } else if (status != NULL || options.use_live_cib) { /* live queries will always have a status section and can do a full simulation */ pcmk__schedule_actions(data_set, cib_object, NULL); } else { data_set->now = crm_time_new(NULL); data_set->input = cib_object; stage0(data_set); } pe_free_working_set(data_set); if (pcmk__config_error) { rc = pcmk_rc_schema_validation; if (args->verbosity > 0) { g_set_error(&error, PCMK__RC_ERROR, rc, "Errors found during check: config not valid"); } else { g_set_error(&error, PCMK__RC_ERROR, rc, "Errors found during check: config not valid\n-V may provide more details"); } } else if (pcmk__config_warning) { rc = pcmk_rc_schema_validation; if (args->verbosity > 0) { g_set_error(&error, PCMK__RC_ERROR, rc, "Warnings found during check: config may not be valid"); } else { g_set_error(&error, PCMK__RC_ERROR, rc, "Warnings found during check: config may not be valid\n-V may provide more details"); } } if (options.use_live_cib && cib_conn) { cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } done: g_strfreev(processed_args); pcmk__free_arg_context(context); free(options.cib_save); free(options.xml_file); free(options.xml_string); if (exit_code == CRM_EX_OK) { exit_code = pcmk_rc2exitc(rc); } pcmk__output_and_clear_error(error, NULL); if (out != NULL) { out->finish(out, exit_code, true, NULL); pcmk__output_free(out); } crm_exit(exit_code); }