diff --git a/.gitignore b/.gitignore
index 72d496d999..0b02601646 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,351 +1,351 @@
 #
 # Copyright 2011-2023 the Pacemaker project contributors
 #
 # The version control history for this file may have further details.
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 
 # Common conventions for files that should be ignored
 *~
 *.bz2
 *.diff
 *.orig
 *.patch
 *.rej
 *.sed
 *.swp
 *.tar.gz
 *.tgz
 \#*
 .\#*
 logs
 
 # libtool artifacts
 *.la
 *.lo
 .libs
 libltdl
 libtool
 libtool.m4
 ltdl.m4
 /m4/argz.m4
 /m4/ltargz.m4
 /m4/ltoptions.m4
 /m4/ltsugar.m4
 /m4/ltversion.m4
 /m4/lt~obsolete.m4
 
 # autotools artifacts
 .deps
 .dirstamp
 Makefile
 Makefile.in
 aclocal.m4
 autoconf
 autoheader
 autom4te.cache/
 automake
 /confdefs.h
 config.log
 config.status
 configure
 /conftest*
 
 # gettext artifacts
 /ABOUT-NLS
 /m4/codeset.m4
 /m4/fcntl-o.m4
 /m4/gettext.m4
 /m4/glibc2.m4
 /m4/glibc21.m4
 /m4/iconv.m4
 /m4/intdiv0.m4
 /m4/intl.m4
 /m4/intldir.m4
 /m4/intlmacosx.m4
 /m4/intmax.m4
 /m4/inttypes-pri.m4
 /m4/inttypes_h.m4
 /m4/lcmessage.m4
 /m4/lib-ld.m4
 /m4/lib-link.m4
 /m4/lib-prefix.m4
 /m4/lock.m4
 /m4/longlong.m4
 /m4/nls.m4
 /m4/po.m4
 /m4/printf-posix.m4
 /m4/progtest.m4
 /m4/size_max.m4
 /m4/stdint_h.m4
 /m4/threadlib.m4
 /m4/uintmax_t.m4
 /m4/visibility.m4
 /m4/wchar_t.m4
 /m4/wint_t.m4
 /m4/xsize.m4
 /po/*.gmo
 /po/*.header
 /po/*.pot
 /po/*.sin
 /po/Makefile.in.in
 /po/Makevars.template
 /po/POTFILES
 /po/Rules-quot
 /po/stamp-po
 
 # configure targets
 /agents/ocf/ClusterMon
 /agents/ocf/Dummy
 /agents/ocf/HealthCPU
 /agents/ocf/HealthIOWait
 /agents/ocf/HealthSMART
 /agents/ocf/Stateful
 /agents/ocf/SysInfo
 /agents/ocf/attribute
 /agents/ocf/controld
 /agents/ocf/ifspeed
 /agents/ocf/o2cb
 /agents/ocf/ping
 /agents/ocf/remote
 /agents/stonith/fence_legacy
 /agents/stonith/fence_watchdog
 /cts/benchmark/clubench
 /cts/cts-attrd
 /cts/cts-cli
 /cts/cts-exec
 /cts/cts-fencing
 /cts/cts-regression
 /cts/cts-scheduler
 /cts/lab/CTS.py
 /cts/lab/CTSlab.py
 /cts/lab/OCFIPraTest.py
 /cts/lab/cluster_test
 /cts/lab/cts
 /cts/lab/cts-log-watcher
-/cts/lxc_autogen.sh
 /cts/support/LSBDummy
 /cts/support/cts-support
 /cts/support/fence_dummy
 /cts/support/pacemaker-cts-dummyd
 /cts/support/pacemaker-cts-dummyd@.service
 /daemons/execd/pacemaker_remote
 /daemons/execd/pacemaker_remote.service
 /daemons/fenced/fence_legacy
 /daemons/fenced/fence_watchdog
 /daemons/pacemakerd/pacemaker.combined.upstart
 /daemons/pacemakerd/pacemaker.service
 /daemons/pacemakerd/pacemaker.upstart
 /doc/Doxyfile
 /etc/init.d/pacemaker
 /etc/logrotate.d/pacemaker
 /etc/sysconfig/pacemaker
 /include/config.h
 /include/config.h.in
 /include/crm_config.h
 /maint/bumplibs
 /python/pacemaker/buildoptions.py
 /python/setup.py
 /tools/cluster-clean
 /tools/cluster-helper
 /tools/cluster-init
 /tools/cibsecret
 /tools/crm_error
 /tools/crm_failcount
 /tools/crm_master
 /tools/crm_mon.service
 /tools/crm_mon.upstart
 /tools/crm_report
 /tools/crm_rule
 /tools/crm_standby
 /tools/pcmk_simtimes
 /tools/report.collector
 /tools/report.common
 
 # Compiled targets and intermediary files
 *.o
 *.pc
 *.pyc
 /daemons/attrd/pacemaker-attrd
 /daemons/based/pacemaker-based
 /daemons/controld/pacemaker-controld
 /daemons/execd/cts-exec-helper
 /daemons/execd/pacemaker-execd
 /daemons/execd/pacemaker-remoted
 /daemons/fenced/cts-fence-helper
 /daemons/fenced/pacemaker-fenced
 /daemons/pacemakerd/pacemakerd
 /daemons/schedulerd/pacemaker-schedulerd
 /devel/scratch
 /lib/gnu/stdalign.h
 /tools/attrd_updater
 /tools/cibadmin
 /tools/crmadmin
 /tools/crm_attribute
 /tools/crm_diff
 /tools/crm_mon
 /tools/crm_node
 /tools/crm_resource
 /tools/crm_shadow
 /tools/crm_simulate
 /tools/crm_ticket
 /tools/crm_verify
 /tools/iso8601
 /tools/stonith_admin
 
 # Generated XML schema files
 /xml/crm_mon.rng
 /xml/pacemaker*.rng
 /xml/versions.rng
 /xml/api/api-result*.rng
 
 # Working directories for make dist and make export
 /pacemaker-[a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9]
 
 # Documentation build targets and intermediary files
 *.7
 *.7.xml
 *.7.html
 *.8
 *.8.xml
 *.8.html
 GPATH
 GRTAGS
 GTAGS
 TAGS
 /daemons/fenced/pacemaker-fenced.xml
 /daemons/schedulerd/pacemaker-schedulerd.xml
 /doc/.ABI-build
 /doc/HTML
 /doc/abi_dumps
 /doc/abi-check
 /doc/api/
 /doc/compat_reports
 /doc/crm_fencing.html
 /doc/sphinx/*/_build
 /doc/sphinx/*/conf.py
 /doc/sphinx/*/generated
 /doc/sphinx/build-2.1.txt
 /doc/sphinx/shared/images/*.png
 
 # Test artifacts (from unit tests, regression tests, static analysis, etc.)
 *.coverity
 *.gcda
 *.gcno
 coverity-*
 pacemaker_*.info
 /coverage
 /cppcheck.out
 /cts/scheduler/*.ref
 /cts/scheduler/*.up
 /cts/scheduler/*.up.err
 /cts/scheduler/bug-rh-1097457.log
 /cts/scheduler/bug-rh-1097457.trs
 /cts/scheduler/shadow.*
 /cts/test-suite.log
 /lib/*/tests/*/*.log
 /lib/*/tests/*/*_test
 /lib/*/tests/*/*.trs
 /xml/test-*/*.up
 /xml/test-*/*.up.err
 /xml/assets/*.rng
 /xml/assets/diffview.js
 /xml/assets/xmlcatalog
 /test/_test_file.c
 
 # Packaging artifacts
 *.rpm
 /pacemaker.spec
 /rpm/[A-LN-Z]*
 /rpm/build.counter
 /rpm/mock
 
 # Project maintainer artifacts
 /maint/gnulib
 /maint/mocked/based
 /maint/testcc_helper.cc
 /maint/testcc_*_h
 
 # Formerly built files (helps when jumping back and forth in checkout)
 /.ABI-build
 /Doxyfile
 /HTML
 /abi_dumps
 /abi-check
 /build.counter
 /compat_reports
 /compile
 /cts/.regression.failed.diff
 /attrd
 /cib
 /config.guess
 /config.sub
 /coverage.sh
 /crmd
 /cts/CTS.py
 /cts/CTSlab.py
 /cts/CTSvars.py
 /cts/HBDummy
 /cts/LSBDummy
 /cts/OCFIPraTest.py
 /cts/cluster_test
 /cts/cts
 /cts/cts-coverage
 /cts/cts-log-watcher
 /cts/cts-support
 /cts/fence_dummy
 /cts/lab/CTSvars.py
+/cts/lxc_autogen.sh
 /cts/pacemaker-cts-dummyd
 /cts/pacemaker-cts-dummyd@.service
 /daemons/based/cibmon
 /daemons/fenced/fence_legacy
 /daemons/fenced/fence_watchdog
 /daemons/pacemakerd/pacemaker
 /depcomp
 /doc/*.build
 /doc/*/en-US/Ap-*.xml
 /doc/*/en-US/Ch-*.xml
 /doc/*/publican.cfg
 /doc/*/publish
 /doc/*/tmp/**
 /doc/Clusters_from_Scratch.txt
 /doc/Pacemaker_Explained.txt
 /doc/acls.html
 /doc/publican-catalog*
 /doc/shared/en-US/*.xml
 /doc/shared/en-US/images/pcmk-*.png
 /doc/shared/en-US/images/Policy-Engine-*.png
 /extra/*/*
 /fencing
 /include/stamp-*
 /install-sh
 /lib/common/md5.c
 /lib/common/tests/flags/pcmk__clear_flags_as
 /lib/common/tests/flags/pcmk__set_flags_as
 /lib/common/tests/flags/pcmk_all_flags_set
 /lib/common/tests/flags/pcmk_any_flags_set
 /lib/common/tests/operations/parse_op_key
 /lib/common/tests/strings/pcmk__btoa
 /lib/common/tests/strings/pcmk__parse_ll_range
 /lib/common/tests/strings/pcmk__scan_double
 /lib/common/tests/strings/pcmk__str_any_of
 /lib/common/tests/strings/pcmk__strcmp
 /lib/common/tests/strings/pcmk__char_in_any_str
 /lib/common/tests/utils/pcmk_str_is_infinity
 /lib/common/tests/utils/pcmk_str_is_minus_infinity
 /lib/gnu/libgnu.a
 /lib/pengine/tests/rules/pe_cron_range_satisfied
 /lrmd
 /ltmain.sh
 /mcp
 /missing
 /mock
 /pacemaker-*.spec
 /pengine
 /py-compile
 /scratch
 /test-driver
 /xml/crm.dtd
 ylwrap
diff --git a/INSTALL.md b/INSTALL.md
index 78f031435c..1845e42d20 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -1,87 +1,83 @@
 # How to Install Pacemaker
 
 ## Build Dependencies
 
 | Version         | Fedora-based       | Suse-based         | Debian-based   |
 |:---------------:|:------------------:|:------------------:|:--------------:|
 | 1.13 or later   | automake           | automake           | automake       |
 | 2.64 or later   | autoconf           | autoconf           | autoconf       |
 |                 | libtool            | libtool            | libtool        |
 |                 | libtool-ltdl-devel |                    | libltdl-dev    |
 |                 | libuuid-devel      | libuuid-devel      | uuid-dev       |
 | 0.27 or later   | pkgconfig          | pkgconfig          | pkg-config     |
 | 2.42.0 or later | glib2-devel        | glib2-devel        | libglib2.0-dev |
 |                 | libxml2-devel      | libxml2-devel      | libxml2-dev    |
 |                 | libxslt-devel      | libxslt-devel      | libxslt-dev    |
 |                 | bzip2-devel        | libbz2-devel       | libbz2-dev     |
 | 0.17.0 or later | libqb-devel        | libqb-devel        | libqb-dev      |
 | 3.4 or later    | python3            | python3            | python3        |
 | 0.18 or later   | gettext-devel      | gettext-tools      | gettext        |
 | 0.18 or later   |                    |                    | autopoint      |
 
 Also:
 * make must be GNU (or compatible) (setting MAKE=gmake might also work but is
   untested)
 * GNU (or compatible) getopt must be somewhere on the PATH
 
 ### Cluster Stack Dependencies
 
 *Only corosync is currently supported*
 
 | Version         | Fedora-based       | Suse-based         | Debian-based   |
 |:---------------:|:------------------:|:------------------:|:--------------:|
 | 2.0.0 or later  | corosynclib        | libcorosync        | corosync       |
 | 2.0.0 or later  | corosynclib-devel  | libcorosync-devel  |                |
 |                 |                    |                    | libcfg-dev     |
 |                 |                    |                    | libcpg-dev     |
 |                 |                    |                    | libcmap-dev    |
 |                 |                    |                    | libquorum-dev  |
 
 ### Optional Build Dependencies
 
 | Feature Enabled                                 | Version        | Fedora-based            | Suse-based              | Debian-based            |
 |:-----------------------------------------------:|:--------------:|:-----------------------:|:-----------------------:|:-----------------------:|
 | Pacemaker Remote and encrypted remote CIB admin | 2.12.0 or later| gnutls-devel            | libgnutls-devel         | libgnutls-dev           |
 | encrypted remote CIB admin                      |                | pam-devel               | pam-devel               | libpam0g-dev            |
 | interactive crm_mon                             |                | ncurses-devel           | ncurses-devel           | ncurses-dev             |
 | systemd support                                 |                | systemd-devel           | systemd-devel           | libsystemd-dev          |
 | systemd/upstart resource support                |                | dbus-devel              | dbus-devel              | libdbus-1-dev           |
 | Linux-HA style fencing agents                   |                | cluster-glue-libs-devel | libglue-devel           | cluster-glue-dev        |
 | documentation                                   |                | asciidoc or asciidoctor | asciidoc or asciidoctor | asciidoc or asciidoctor |
 | documentation                                   |                | help2man                | help2man                | help2man                |
 | documentation                                   |                | inkscape                | inkscape                | inkscape                |
 | documentation                                   |                | docbook-style-xsl       | docbook-xsl-stylesheets | docbook-xsl             |
 | documentation                                   |                | python3-sphinx          | python3-sphinx          | python3-sphinx          |
 | documentation (PDF)                             |                | latexmk texlive texlive-capt-of texlive-collection-xetex texlive-fncychap texlive-framed texlive-multirow texlive-needspace texlive-tabulary texlive-titlesec texlive-threeparttable texlive-upquote texlive-wrapfig texlive-xetex | texlive texlive-latex  | texlive texlive-latex-extra |
 | annotated source code as HTML via "make global" |                | global                  | global                  | global                  |
 | RPM packages via "make rpm"                     | 4.11 or later  | rpm                     | rpm                     | (n/a)                   |
 | unit tests                                      | 1.1.0 or later | libcmocka-devel         | libcmocka-devel         | libcmocka-dev           |
 
 ## Optional testing dependencies
 * procps and psmisc (if running cts-exec, cts-fencing, or CTS)
 * valgrind (if running CTS valgrind tests)
 * python3-systemd (if using CTS on cluster nodes running systemd)
-* rsync (if running CTS container tests)
-* libvirt-daemon-driver-lxc (if running CTS container tests)
-* libvirt-daemon-lxc (if running CTS container tests)
-* libvirt-login-shell (if running CTS container tests)
 * nmap (if not specifying an IP address base)
 * oprofile (if running CTS profiling tests)
 * dlm (to log DLM debugging info after CTS tests)
 * xmllint (to validate tool output in cts-cli)
 
 ## Simple install
 
     $ make && sudo make install
 
 If GNU make is not your default make, use "gmake" instead.
 
 ## Detailed install
 
 First, browse the build options that are available:
 
     $ ./autogen.sh
     $ ./configure --help
 
 Re-run ./configure with any options you want, then proceed with the simple
 method.
diff --git a/configure.ac b/configure.ac
index ee005f0cff..0fbd49702f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,2155 +1,2154 @@
 dnl
 dnl autoconf for Pacemaker
 dnl
 dnl Copyright 2009-2023 the Pacemaker project contributors
 dnl
 dnl The version control history for this file may have further details.
 dnl
 dnl This source code is licensed under the GNU General Public License version 2
 dnl or later (GPLv2+) WITHOUT ANY WARRANTY.
 
 dnl ===============================================
 dnl Bootstrap
 dnl ===============================================
 AC_PREREQ(2.64)
 
 dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08).
 dnl Once we can require that version, we can simplify this, and no longer
 dnl need ACLOCAL_AMFLAGS in Makefile.am.
 m4_ifdef([AC_CONFIG_MACRO_DIRS],
          [AC_CONFIG_MACRO_DIRS([m4])],
          [AC_CONFIG_MACRO_DIR([m4])])
 
 AC_DEFUN([AC_DATAROOTDIR_CHECKED])
 
 dnl Suggested structure:
 dnl     information on the package
 dnl     checks for programs
 dnl     checks for libraries
 dnl     checks for header files
 dnl     checks for types
 dnl     checks for structures
 dnl     checks for compiler characteristics
 dnl     checks for library functions
 dnl     checks for system services
 
 m4_include([m4/version.m4])
 AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker],
         PCMK_URL)
 
 PCMK_FEATURES=""
 
 LT_CONFIG_LTDL_DIR([libltdl])
 AC_CONFIG_AUX_DIR([libltdl/config])
 AC_CANONICAL_HOST
 
 dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go
 dnl
 dnl Internal header: include/config.h
 dnl   - Contains ALL defines
 dnl   - include/config.h.in is generated automatically by autoheader
 dnl   - NOT to be included in any header files except crm_internal.h
 dnl     (which is also not to be included in any other header files)
 dnl
 dnl External header: include/crm_config.h
 dnl   - Contains a subset of defines checked here
 dnl   - Manually edit include/crm_config.h.in to have configure include
 dnl     new defines
 dnl   - Should not include HAVE_* defines
 dnl   - Safe to include anywhere
 AC_CONFIG_HEADERS([include/config.h include/crm_config.h])
 
 dnl 1.13:           minimum automake version required
 dnl foreign:        don't require GNU-standard top-level files
 dnl tar-ustar:      use (older) POSIX variant of generated tar rather than v7
 dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+)
 AM_INIT_AUTOMAKE([1.13 foreign tar-ustar subdir-objects])
 
 dnl Require minimum version of pkg-config
 PKG_PROG_PKG_CONFIG(0.27)
 AS_IF([test x"${PKG_CONFIG}" != x""], [],
       [AC_MSG_FAILURE([Could not find required build tool pkg-config (0.27 or later)])])
 PKG_INSTALLDIR
 PKG_NOARCH_INSTALLDIR
 
 dnl Example 2.4. Silent Custom Rule to Generate a File
 dnl %-bar.pc: %.pc
 dnl	$(AM_V_GEN)$(LN_S) $(notdir $^) $@
 
 CC_IN_CONFIGURE=yes
 export CC_IN_CONFIGURE
 
 LDD=ldd
 
 dnl ========================================================================
 dnl Compiler characteristics
 dnl ========================================================================
 
 dnl A particular compiler can be forced by setting the CC environment variable
 AC_PROG_CC
 
 dnl Use at least C99 if possible (automatic for autoconf >= 2.70)
 m4_version_prereq([2.70], [:], [AC_PROG_CC_STDC])
 
 dnl C++ is not needed for build, just maintainer utilities
 AC_PROG_CXX
 
 dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs:
 dnl "The macro gl_EARLY must be called as soon as possible after verifying that
 dnl the C compiler is working. ... The core part of the gnulib checks are done
 dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL
 dnl as a dependency.
 gl_EARLY
 gl_SET_CRYPTO_CHECK_DEFAULT([no])
 gl_INIT
 
 # --enable-new-dtags: Use RUNPATH instead of RPATH.
 # It is necessary to have this done before libtool does linker detection.
 # See also: https://github.com/kronosnet/kronosnet/issues/107
 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags],
                   [AM_LDFLAGS=-Wl,--enable-new-dtags],
                   [AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])])
 AC_SUBST([AM_LDFLAGS])
 
 saved_LDFLAGS="$LDFLAGS"
 LDFLAGS="$AM_LDFLAGS $LDFLAGS"
 LT_INIT([dlopen])
 LDFLAGS="$saved_LDFLAGS"
 LTDL_INIT([convenience])
 
 AC_TYPE_SIZE_T
 AC_CHECK_SIZEOF(char)
 AC_CHECK_SIZEOF(short)
 AC_CHECK_SIZEOF(int)
 AC_CHECK_SIZEOF(long)
 AC_CHECK_SIZEOF(long long)
 
 dnl ===============================================
 dnl Helpers
 dnl ===============================================
 cc_supports_flag() {
     local CFLAGS="-Werror $@"
     AC_MSG_CHECKING([whether $CC supports $@])
     AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])],
                       [RC=0; AC_MSG_RESULT([yes])],
                       [RC=1; AC_MSG_RESULT([no])])
     return $RC
 }
 
 # Some tests need to use their own CFLAGS
 
 cc_temp_flags() {
     ac_save_CFLAGS="$CFLAGS"
     CFLAGS="$*"
 }
 
 cc_restore_flags() {
     CFLAGS=$ac_save_CFLAGS
 }
 
 # expand_path_option $path_variable_name $default
 expand_path_option() {
     # The first argument is the variable *name* (not value)
     ac_path_varname="$1"
 
     # Get the original value of the variable
     ac_path_value=$(eval echo "\${${ac_path_varname}}")
 
     # Expand any literal variable expressions in the value so that we don't
     # end up with something like '${prefix}' in #defines etc.
     #
     # Autoconf deliberately leaves values unexpanded to allow overriding
     # the configure script choices in make commands (for example,
     # "make exec_prefix=/foo install"). No longer being able to do this seems
     # like no great loss.
     eval ac_path_value=$(eval echo "${ac_path_value}")
 
     # Use (expanded) default if necessary
     AS_IF([test x"${ac_path_value}" = x""],
 	  [eval ac_path_value=$(eval echo "$2")])
 
     # Require a full path
     AS_CASE(["$ac_path_value"],
             [/*], [eval ${ac_path_varname}="$ac_path_value"],
             [*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])]
     )
 }
 
 # yes_no_try $user_response $default
 DISABLED=0
 REQUIRED=1
 OPTIONAL=2
 yes_no_try() {
     local value
     AS_IF([test x"$1" = x""], [value="$2"], [value="$1"])
     AS_CASE(["`echo "$value" | tr '[A-Z]' '[a-z]'`"],
             [0|no|false|disable], [return $DISABLED],
             [1|yes|true|enable], [return $REQUIRED],
             [try|check], [return $OPTIONAL]
     )
     AC_MSG_ERROR([Invalid option value "$value"])
 }
 
 check_systemdsystemunitdir() {
     AC_MSG_CHECKING([which system unit file directory to use])
     PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir])
     AC_MSG_RESULT([${systemdsystemunitdir}])
     test x"$systemdsystemunitdir" != x""
     return $?
 }
 
 #
 # Fix the defaults of certain built-in variables so they can be used in our
 # custom argument defaults
 #
 
 AC_MSG_NOTICE([Sanitizing prefix: ${prefix}])
 AS_IF([test x"$prefix" = x"NONE"],
       [
           prefix=/usr
           dnl Fix default variables - "prefix" variable if not specified
           AS_IF([test x"$localstatedir" = x"\${prefix}/var"],
                 [localstatedir="/var"])
           AS_IF([test x"$sysconfdir" = x"\${prefix}/etc"],
                 [sysconfdir="/etc"])
       ])
 
 AC_MSG_NOTICE([Sanitizing exec_prefix: ${exec_prefix}])
 AS_CASE([$exec_prefix],
         [prefix|NONE], [exec_prefix=$prefix])
 
 AC_MSG_NOTICE([Sanitizing libdir: ${libdir}])
 AS_CASE([$libdir],
         [prefix|NONE], [
             AC_MSG_CHECKING([which lib directory to use])
             for aDir in lib64 lib
             do
                 trydir="${exec_prefix}/${aDir}"
                 AS_IF([test -d ${trydir}],
                       [
                           libdir=${trydir}
                           break
                       ])
             done
             AC_MSG_RESULT([$libdir])
         ])
 
 dnl ===============================================
 dnl Configure Options
 dnl ===============================================
 
 dnl Actual library checks come later, but pkg-config can be used here to grab
 dnl external values to use as defaults for configure options
 
 dnl Per the autoconf docs, --enable-*/--disable-* options should control
 dnl features inherent to Pacemaker, while --with-*/--without-* options should
 dnl control the use of external software. However, --enable-*/--disable-* may
 dnl implicitly require additional external dependencies, and
 dnl --with-*/--without-* may implicitly enable or disable features, so the
 dnl line is blurry.
 dnl
 dnl We also use --with-* options for custom file, directory, and path
 dnl locations, since autoconf does not provide an option type for those.
 
 dnl --enable-* options: build process
 
 AC_ARG_ENABLE([quiet],
     [AS_HELP_STRING([--enable-quiet],
         [suppress make output unless there is an error @<:@no@:>@])]
 )
 yes_no_try "$enable_quiet" "no"
 enable_quiet=$?
 
 AC_ARG_ENABLE([fatal-warnings],
     [AS_HELP_STRING([--enable-fatal-warnings],
         [enable pedantic and fatal warnings for gcc @<:@try@:>@])],
 )
 yes_no_try "$enable_fatal_warnings" "try"
 enable_fatal_warnings=$?
 
 AC_ARG_ENABLE([hardening],
     [AS_HELP_STRING([--enable-hardening],
         [harden the resulting executables/libraries @<:@try@:>@])]
 )
 yes_no_try "$enable_hardening" "try"
 enable_hardening=$?
 
 dnl --enable-* options: features
 
 AC_ARG_ENABLE([systemd],
     [AS_HELP_STRING([--enable-systemd],
         [enable support for managing resources via systemd @<:@try@:>@])]
 )
 yes_no_try "$enable_systemd" "try"
 enable_systemd=$?
 
 AC_ARG_ENABLE([upstart],
     [AS_HELP_STRING([--enable-upstart],
         [enable support for managing resources via Upstart (deprecated) @<:@try@:>@])]
 )
 yes_no_try "$enable_upstart" "try"
 enable_upstart=$?
 
 dnl --enable-* options: features inherent to Pacemaker
 
 AC_ARG_ENABLE([compat-2.0],
     [AS_HELP_STRING([--enable-compat-2.0], m4_normalize([
         preserve certain output as it was in 2.0; this option will be
         available only for the lifetime of the 2.1 series @<:@no@:>@]))]
 )
 yes_no_try "$enable_compat_2_0" "no"
 enable_compat_2_0=$?
 AS_IF([test $enable_compat_2_0 -ne $DISABLED],
       [
           AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1],
                              [Keep certain output compatible with 2.0 release series])
           PCMK_FEATURES="$PCMK_FEATURES compat-2.0"
       ]
 )
 
 # Add an option to create symlinks at the pre-2.0.0 daemon name locations, so
 # that users and tools can continue to invoke those names directly (e.g., for
 # meta-data). This option will be removed in a future release.
 AC_ARG_ENABLE([legacy-links],
     [AS_HELP_STRING([--enable-legacy-links],
         [add symlinks for old daemon names (deprecated) @<:@no@:>@])]
 )
 yes_no_try "$enable_legacy_links" "no"
 enable_legacy_links=$?
 AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED])
 
 # AM_GNU_GETTEXT calls AM_NLS which defines the nls option, but it defaults
 # to enabled. We override the definition of AM_NLS to flip the default and mark
 # it as experimental in the help text.
 AC_DEFUN([AM_NLS],
     [AC_MSG_CHECKING([whether NLS is requested])
     AC_ARG_ENABLE([nls],
         [AS_HELP_STRING([--enable-nls],
             [use Native Language Support (experimental)])],
         USE_NLS=$enableval, USE_NLS=no)
     AC_MSG_RESULT([$USE_NLS])
     AC_SUBST([USE_NLS])]
 )
 
 AM_GNU_GETTEXT([external])
 AM_GNU_GETTEXT_VERSION([0.18])
 
 AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"])
 
 dnl --with-* options: external software support, and custom locations
 
 dnl This argument is defined via an M4 macro so default can be a variable
 AC_DEFUN([VERSION_ARG],
     [AC_ARG_WITH([version],
         [AS_HELP_STRING([--with-version=VERSION],
             [override package version @<:@$1@:>@])],
         [ PACEMAKER_VERSION="$withval" ],
         [ PACEMAKER_VERSION="$PACKAGE_VERSION" ])]
 )
 VERSION_ARG(VERSION_NUMBER)
 
 # Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case
 # the user used --with-version. Unfortunately, this can only affect the
 # substitution variables and later uses in this file, not the config.h
 # constants, so we have to be careful to use only PACEMAKER_VERSION in C code.
 PACKAGE_VERSION=$PACEMAKER_VERSION
 VERSION=$PACEMAKER_VERSION
 
 # Detect highest API schema version (use git if available to list managed RNGs,
 # in case there are leftover schema files from an earlier build of a different
 # version, otherwise check all RNGs)
 API_VERSION=$({ git ls-files xml/api/*.rng 2>/dev/null || ls -1 xml/api/*.rng ; } dnl
               | sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' | sort -V | tail -1)
 AC_DEFINE_UNQUOTED([PCMK__API_VERSION], ["$API_VERSION"],
                    [Highest API schema version])
 
 # Re-run configure at next make if any RNG changes, to re-detect highest
 AC_SUBST([CONFIG_STATUS_DEPENDENCIES],
          [$(echo '$(wildcard $(top_srcdir)/xml/api/*.rng)')])
 
 CRM_DAEMON_USER=""
 AC_ARG_WITH([daemon-user],
     [AS_HELP_STRING([--with-daemon-user=USER],
         [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])],
     [ CRM_DAEMON_USER="$withval" ]
 )
 
 CRM_DAEMON_GROUP=""
 AC_ARG_WITH([daemon-group],
     [AS_HELP_STRING([--with-daemon-group=GROUP],
         [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])],
     [ CRM_DAEMON_GROUP="$withval" ]
 )
 
 BUG_URL=""
 AC_ARG_WITH([bug-url],
     [AS_HELP_STRING([--with-bug-url=DIR], m4_normalize([
         address where users should submit bug reports
         @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))],
     [ BUG_URL="$withval" ]
 )
 
 dnl --with-* options: features
 
 AC_ARG_WITH([cibsecrets],
     [AS_HELP_STRING([--with-cibsecrets],
         [support separate file for CIB secrets @<:@no@:>@])]
 )
 yes_no_try "$with_cibsecrets" "no"
 with_cibsecrets=$?
 
 AC_ARG_WITH([gnutls],
     [AS_HELP_STRING([--with-gnutls],
         [support Pacemaker Remote and remote-tls-port using GnuTLS @<:@try@:>@])]
 )
 yes_no_try "$with_gnutls" "try"
 with_gnutls=$?
 
 PCMK_GNUTLS_PRIORITIES="NORMAL"
 AC_ARG_WITH([gnutls-priorities],
     [AS_HELP_STRING([--with-gnutls-priorities],
         [default GnuTLS cipher priorities @<:@NORMAL@:>@])],
     [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ]
 )
 
 AC_ARG_WITH([concurrent-fencing-default],
     [AS_HELP_STRING([--with-concurrent-fencing-default],
         [default value for concurrent-fencing cluster option @<:@false@:>@])],
 )
 AS_CASE([$with_concurrent_fencing_default],
         [""], [with_concurrent_fencing_default="false"],
         [false], [],
         [true], [PCMK_FEATURES="$PCMK_FEATURES default-concurrent-fencing"],
         [AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])]
 )
 AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT],
                    ["$with_concurrent_fencing_default"],
                    [Default value for concurrent-fencing cluster option])
 
 AC_ARG_WITH([sbd-sync-default],
     [AS_HELP_STRING([--with-sbd-sync-default], m4_normalize([
         default value used by sbd if SBD_SYNC_RESOURCE_STARTUP
         environment variable is not set @<:@false@:>@]))],
 )
 AS_CASE([$with_sbd_sync_default],
         [""], [with_sbd_sync_default=false],
         [false], [],
         [true], [PCMK_FEATURES="$PCMK_FEATURES default-sbd-sync"],
         [AC_MSG_ERROR([Invalid value "$with_sbd_sync_default" for --with-sbd-sync-default])]
 )
 AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT],
                    [$with_sbd_sync_default],
                    [Default value for SBD_SYNC_RESOURCE_STARTUP environment variable])
 
 AC_ARG_WITH([resource-stickiness-default],
     [AS_HELP_STRING([--with-resource-stickiness-default],
         [If positive, value to add to new CIBs as explicit resource default for resource-stickiness @<:@0@:>@])],
 )
 errmsg="Invalid value \"$with_resource_stickiness_default\" for --with-resource-stickiness-default"
 AS_CASE([$with_resource_stickiness_default],
         [0|""], [with_resource_stickiness_default="0"],
         [*[[!0-9]]*], [AC_MSG_ERROR([$errmsg])],
         [PCMK_FEATURES="$PCMK_FEATURES default-resource-stickiness"]
 )
 AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT],
                    [$with_resource_stickiness_default],
                    [Default value for resource-stickiness resource meta-attribute])
 
 AC_ARG_WITH([corosync],
     [AS_HELP_STRING([--with-corosync],
         [support the Corosync messaging and membership layer @<:@try@:>@])]
 )
 yes_no_try "$with_corosync" "try"
 with_corosync=$?
 
 dnl Get default from corosync if possible.
 PKG_CHECK_VAR([PCMK__COROSYNC_CONF], [corosync], [corosysconfdir],
               [PCMK__COROSYNC_CONF="$PCMK__COROSYNC_CONF/corosync.conf"],
               [PCMK__COROSYNC_CONF="${sysconfdir}/corosync/corosync.conf"])
 AC_ARG_WITH([corosync-conf],
     [AS_HELP_STRING([--with-corosync-conf], m4_normalize([
         location of Corosync configuration file
         @<:@value from Corosync package if available otherwise
         SYSCONFDIR/corosync/corosync.conf@:>@]))],
     [ PCMK__COROSYNC_CONF="$withval" ]
 )
 
 AC_ARG_WITH([nagios],
     [AS_HELP_STRING([--with-nagios], [support nagios resources (deprecated)])]
 )
 yes_no_try "$with_nagios" "try"
 with_nagios=$?
 
 dnl --with-* options: directory locations
 
 AC_ARG_WITH([nagios-plugin-dir],
     [AS_HELP_STRING([--with-nagios-plugin-dir=DIR],
         [directory for nagios plugins (deprecated) @<:@LIBEXECDIR/nagios/plugins@:>@])],
     [ NAGIOS_PLUGIN_DIR="$withval" ]
 )
 
 AC_ARG_WITH([nagios-metadata-dir],
     [AS_HELP_STRING([--with-nagios-metadata-dir=DIR],
         [directory for nagios plugins metadata (deprecated) @<:@DATADIR/nagios/plugins-metadata@:>@])],
     [ NAGIOS_METADATA_DIR="$withval" ]
 )
 
 INITDIR=""
 AC_ARG_WITH([initdir],
     [AS_HELP_STRING([--with-initdir=DIR],
         [directory for init (rc) scripts])],
     [ INITDIR="$withval" ]
 )
 
 systemdsystemunitdir="${systemdsystemunitdir-}"
 AC_ARG_WITH([systemdsystemunitdir],
     [AS_HELP_STRING([--with-systemdsystemunitdir=DIR],
         [directory for systemd unit files (advanced option: must match what systemd uses)])],
     [ systemdsystemunitdir="$withval" ]
 )
 
 CONFIGDIR=""
 AC_ARG_WITH([configdir],
     [AS_HELP_STRING([--with-configdir=DIR],
         [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])],
     [ CONFIGDIR="$withval" ]
 )
 
 dnl --runstatedir is available as of autoconf 2.70 (2020-12-08). When users
 dnl have an older version, they can use our --with-runstatedir.
 pcmk_runstatedir=""
 AC_ARG_WITH([runstatedir],
     [AS_HELP_STRING([--with-runstatedir=DIR],
         [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])],
     [ pcmk_runstatedir="$withval" ]
 )
 
 CRM_LOG_DIR=""
 AC_ARG_WITH([logdir],
     [AS_HELP_STRING([--with-logdir=DIR],
         [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])],
     [ CRM_LOG_DIR="$withval" ]
 )
 
 CRM_BUNDLE_DIR=""
 AC_ARG_WITH([bundledir],
     [AS_HELP_STRING([--with-bundledir=DIR],
         [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])],
     [ CRM_BUNDLE_DIR="$withval" ]
 )
 
 dnl Get default from resource-agents if possible. Otherwise, the default uses
 dnl /usr/lib rather than libdir because it's determined by the OCF project and
 dnl not Pacemaker. Even if a user wants to install Pacemaker to /usr/local or
 dnl such, the OCF agents will be expected in their usual location. However, we
 dnl do give the user the option to override it.
 PKG_CHECK_VAR([OCF_ROOT_DIR], [resource-agents], [ocfrootdir], [],
               [OCF_ROOT_DIR="/usr/lib/ocf"])
 AC_ARG_WITH([ocfdir],
     [AS_HELP_STRING([--with-ocfdir=DIR], m4_normalize([
         OCF resource agent root directory (advanced option: changing this
         may break other cluster components unless similarly configured)
         @<:@value from resource-agents package if available otherwise
         /usr/lib/ocf@:>@]))],
     [ OCF_ROOT_DIR="$withval" ]
 )
 AC_SUBST(OCF_ROOT_DIR)
 AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"],
                    [OCF root directory for resource agents and libraries])
 
 PKG_CHECK_VAR([OCF_RA_PATH], [resource-agents], [ocfrapath], [],
               [OCF_RA_PATH="$OCF_ROOT_DIR/resource.d"])
 AC_ARG_WITH([ocfrapath],
     [AS_HELP_STRING([--with-ocfrapath=DIR], m4_normalize([
         OCF resource agent directories (colon-separated) to search
         @<:@value from resource-agents package if available otherwise
         OCFDIR/resource.d@:>@]))],
     [ OCF_RA_PATH="$withval" ]
 )
 AC_SUBST(OCF_RA_PATH)
 
 OCF_RA_INSTALL_DIR="$OCF_ROOT_DIR/resource.d"
 AC_ARG_WITH([ocfrainstalldir],
     [AS_HELP_STRING([--with-ocfrainstalldir=DIR], m4_normalize([
         OCF installation directory for Pacemakers resource agents
         @<:@OCFDIR/resource.d@:>@]))],
     [ OCF_RA_INSTALL_DIR="$withval" ]
 )
 AC_SUBST(OCF_RA_INSTALL_DIR)
 
 dnl Get default from fence-agents if available
 PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix],
               [PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"],
               [PCMK__FENCE_BINDIR="$sbindir"])
 AC_ARG_WITH([fence-bindir],
     [AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([
         directory for executable fence agents @<:@value from fence-agents
         package if available otherwise SBINDIR@:>@]))],
     [ PCMK__FENCE_BINDIR="$withval" ]
 )
 AC_SUBST(PCMK__FENCE_BINDIR)
 
 dnl --with-* options: non-production testing
 
 AC_ARG_WITH([profiling],
     [AS_HELP_STRING([--with-profiling],
         [disable optimizations, for effective profiling @<:@no@:>@])]
 )
 yes_no_try "$with_profiling" "no"
 with_profiling=$?
 
 AC_ARG_WITH([coverage],
     [AS_HELP_STRING([--with-coverage],
         [disable optimizations, for effective profiling and coverage testing @<:@no@:>@])]
 )
 yes_no_try "$with_coverage" "no"
 with_coverage=$?
 
 AC_ARG_WITH([sanitizers],
   [AS_HELP_STRING([--with-sanitizers=...,...],
     [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])],
   [ SANITIZERS="$withval" ],
   [ SANITIZERS="" ])
 
 dnl Environment variable options
 
 AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries])
 AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries])
 
 AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables])
 AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables])
 
 
 dnl ===============================================
 dnl General Processing
 dnl ===============================================
 
 AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION",
                    [Version number of this Pacemaker build])
 
 PACKAGE_SERIES=`echo $VERSION | awk -F. '{ print $1"."$2 }'`
 AC_SUBST(PACKAGE_SERIES)
 
 AC_PROG_LN_S
 AC_PROG_MKDIR_P
 
 # Check for fatal warning support
 AS_IF([test $enable_fatal_warnings -ne $DISABLED && test x"$GCC" = x"yes" && cc_supports_flag -Werror],
       [WERROR="-Werror"],
       [
           WERROR=""
           AS_CASE([$enable_fatal_warnings],
                   [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])],
                   [$OPTIONAL], [
                       AC_MSG_NOTICE([Compiler does not support fatal warnings])
                       enable_fatal_warnings=$DISABLED
                   ])
       ])
 
 AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}])
 AS_CASE([$INITDIR],
         [prefix], [INITDIR=$prefix],
         [""], [
             AC_MSG_CHECKING([which init (rc) directory to use])
             for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \
                 /usr/local/etc/rc.d /etc/rc.d
             do
                 AS_IF([test -d $initdir],
                       [
                           INITDIR=$initdir
                           break
                       ])
             done
             AC_MSG_RESULT([$INITDIR])
         ])
 AC_SUBST(INITDIR)
 
 dnl Expand values of autoconf-provided directory options
 expand_path_option prefix
 expand_path_option exec_prefix
 expand_path_option bindir
 expand_path_option sbindir
 expand_path_option libexecdir
 expand_path_option datadir
 expand_path_option sysconfdir
 expand_path_option sharedstatedir
 expand_path_option localstatedir
 expand_path_option libdir
 expand_path_option includedir
 expand_path_option oldincludedir
 expand_path_option infodir
 expand_path_option mandir
 
 dnl Home-grown variables
 
 expand_path_option localedir "${datadir}/locale"
 AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"], [Base directory for message catalogs])
 
 AS_IF([test x"${runstatedir}" = x""], [runstatedir="${pcmk_runstatedir}"])
 expand_path_option runstatedir "${localstatedir}/run"
 AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"],
                    [Location for modifiable per-process data])
 AC_SUBST(runstatedir)
 
 expand_path_option INITDIR
 AC_DEFINE_UNQUOTED([PCMK__LSB_INIT_DIR], ["$INITDIR"],
                    [Location for LSB init scripts])
 
 expand_path_option docdir "${datadir}/doc/${PACKAGE}-${VERSION}"
 AC_SUBST(docdir)
 
 expand_path_option CONFIGDIR "${sysconfdir}/sysconfig"
 AC_SUBST(CONFIGDIR)
 
 expand_path_option PCMK__COROSYNC_CONF "${sysconfdir}/corosync/corosync.conf"
 AC_SUBST(PCMK__COROSYNC_CONF)
 
 expand_path_option CRM_LOG_DIR "${localstatedir}/log/pacemaker"
 AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file)
 AC_SUBST(CRM_LOG_DIR)
 
 expand_path_option CRM_BUNDLE_DIR "${localstatedir}/log/pacemaker/bundles"
 AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs)
 AC_SUBST(CRM_BUNDLE_DIR)
 
 expand_path_option PCMK__FENCE_BINDIR
 AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR",
                    [Location for executable fence agents])
 
 expand_path_option OCF_RA_PATH
 AC_DEFINE_UNQUOTED([OCF_RA_PATH], ["$OCF_RA_PATH"],
                    [OCF directories to search for resource agents ])
 
 AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [],
       [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])])
 AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"],
                    [GnuTLS cipher priorities])
 AC_SUBST(PCMK_GNUTLS_PRIORITIES)
 
 AS_IF([test x"${BUG_URL}" = x""],
       [BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"])
 AC_SUBST(BUG_URL)
 AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"],
                    [Where bugs should be reported])
 
 for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
     sharedstatedir localstatedir libdir includedir oldincludedir infodir \
     mandir INITDIR docdir CONFIGDIR localedir
 do
     dirname=`eval echo '${'${j}'}'`
     AS_IF([test ! -d "$dirname"],
           [AC_MSG_WARN([$j directory ($dirname) does not exist (yet)])])
 done
 
 us_auth=
 AC_CHECK_HEADER([sys/socket.h], [
     AC_CHECK_DECL([SO_PEERCRED], [
         # Linux
         AC_CHECK_TYPE([struct ucred], [
             us_auth=peercred_ucred;
             AC_DEFINE([HAVE_UCRED], [1],
                       [Define if Unix socket auth method is
                        getsockopt(s, SO_PEERCRED, &ucred, ...)])
         ], [
             # OpenBSD
             AC_CHECK_TYPE([struct sockpeercred], [
                 us_auth=localpeercred_sockepeercred;
                 AC_DEFINE([HAVE_SOCKPEERCRED], [1],
                           [Define if Unix socket auth method is
                            getsockopt(s, SO_PEERCRED, &sockpeercred, ...)])
             ], [], [[#include <sys/socket.h>]])
         ], [[#define _GNU_SOURCE
              #include <sys/socket.h>]])
     ], [], [[#include <sys/socket.h>]])
 ])
 
 AS_IF([test -z "${us_auth}"], [
     # FreeBSD
     AC_CHECK_DECL([getpeereid], [
         us_auth=getpeereid;
         AC_DEFINE([HAVE_GETPEEREID], [1],
                   [Define if Unix socket auth method is
                    getpeereid(s, &uid, &gid)])
     ], [
         # Solaris/OpenIndiana
         AC_CHECK_DECL([getpeerucred], [
             us_auth=getpeerucred;
             AC_DEFINE([HAVE_GETPEERUCRED], [1],
                       [Define if Unix socket auth method is
                        getpeercred(s, &ucred)])
         ], [
             AC_MSG_FAILURE([No way to authenticate a Unix socket peer])
         ], [[#include <ucred.h>]])
     ])
 ])
 
 dnl OS-based decision-making is poor autotools practice; feature-based
 dnl mechanisms are strongly preferred. Keep this section to a bare minimum;
 dnl regard as a "necessary evil".
 INIT_EXT=""
 PROCFS=0
 dnl Solaris and some *BSD versions support procfs but not files we need
 AS_CASE(["$host_os"],
         [*bsd*], [INIT_EXT=".sh"],
         [*linux*], [PROCFS=1],
         [darwin*], [
             LIBS="$LIBS -L${prefix}/lib"
             CFLAGS="$CFLAGS -I${prefix}/include"
         ])
 
 AC_SUBST(INIT_EXT)
 AM_CONDITIONAL([SUPPORT_PROCFS], [test $PROCFS -eq 1])
 AC_DEFINE_UNQUOTED([HAVE_LINUX_PROCFS], [$PROCFS],
                    [Define to 1 if procfs is supported])
 
 AS_CASE(["$host_cpu"],
         [ppc64|powerpc64], [
             AS_CASE([$CFLAGS],
                     [*powerpc64*], [],
                     [*], [AS_IF([test x"$GCC" = x"yes"], [CFLAGS="$CFLAGS -m64"])
                     ])
         ])
 
 dnl ===============================================
 dnl Program Paths
 dnl ===============================================
 
 PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
 export PATH
 
 dnl Pacemaker's executable python scripts will invoke the python specified by
 dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a
 dnl built-in list with (unversioned) "python" having precedence. To configure
 dnl Pacemaker to use a specific python interpreter version, define PYTHON
 dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6
 
 dnl Ensure PYTHON is an absolute path
 AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])])
 
 dnl Require a minimum Python version
 AM_PATH_PYTHON([3.4])
 
 AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor])
 AC_PATH_PROG([HELP2MAN], [help2man])
 AC_PATH_PROG([SPHINX], [sphinx-build])
 AC_PATH_PROG([INKSCAPE], [inkscape])
 AC_PATH_PROG([XSLTPROC], [xsltproc])
 AC_PATH_PROG([XMLCATALOG], [xmlcatalog])
 
 dnl Bash is needed for building man pages and running regression tests.
 dnl BASH is already an environment variable, so use something else.
 AC_PATH_PROG([BASH_PATH], [bash])
 AS_IF([test x"${BASH_PATH}" != x""], [],
       [AC_MSG_FAILURE([Could not find required build tool bash])])
 
 AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
 AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
 
 AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"")
 AS_IF([test x"${HELP2MAN}" != x""],
       [PCMK_FEATURES="$PCMK_FEATURES generated-manpages"])
 
 MANPAGE_XSLT=""
 AS_IF([test x"${XSLTPROC}" != x""],
       [
           AC_MSG_CHECKING([for DocBook-to-manpage transform])
           # first try to figure out correct template using xmlcatalog query,
           # resort to extensive (semi-deterministic) file search if that fails
           DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current'
           DOCBOOK_XSL_PATH='manpages/docbook.xsl'
           MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \
                          | sed -n 's|^file://||p;q')
           AS_IF([test x"${MANPAGE_XSLT}" = x""],
                 [
                     DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \
                            -type d 2>/dev/null | LC_ALL=C sort)
                     XSLT=$(basename ${DOCBOOK_XSL_PATH})
                     for d in ${DIRS}
                     do
                         AS_IF([test -f "${d}/${XSLT}"],
                               [
                                   MANPAGE_XSLT="${d}/${XSLT}"
                                   break
                               ])
                     done
                 ])
       ])
 AC_MSG_RESULT([$MANPAGE_XSLT])
 AC_SUBST(MANPAGE_XSLT)
 
 AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"")
 AS_IF([test x"${MANPAGE_XSLT}" != x""],
       [PCMK_FEATURES="$PCMK_FEATURES agent-manpages"])
 
 AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$'])
 AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x])
 AS_IF([test x"${ASCIIDOC_CONV}" != x""],
       [PCMK_FEATURES="$PCMK_FEATURES ascii-docs"])
 
 AM_CONDITIONAL([BUILD_SPHINX_DOCS],
                [test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""])
 AM_COND_IF([BUILD_SPHINX_DOCS], [PCMK_FEATURES="$PCMK_FEATURES books"])
 
 dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt
 AC_MSG_CHECKING([for GNU-compatible getopt])
 IFS_orig=$IFS
 IFS=:
 for PATH_DIR in $PATH
 do
     IFS=$IFS_orig
     GETOPT_PATH="${PATH_DIR}/getopt"
     AS_IF([test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH"],
           [
               $GETOPT_PATH -T >/dev/null 2>/dev/null
               AS_IF([test $? -eq 4], [break])
           ])
     GETOPT_PATH=""
 done
 IFS=$IFS_orig
 AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])],
       [
           AC_MSG_RESULT([no])
           AC_MSG_ERROR([Could not find required build tool GNU-compatible getopt])
       ])
 AC_SUBST([GETOPT_PATH])
 
 dnl ========================================================================
 dnl checks for library functions to replace them
 dnl
 dnl     NoSuchFunctionName:
 dnl             is a dummy function which no system supplies.  It is here to make
 dnl             the system compile semi-correctly on OpenBSD which doesn't know
 dnl             how to create an empty archive
 dnl
 dnl     scandir: Only on BSD.
 dnl             System-V systems may have it, but hidden and/or deprecated.
 dnl             A replacement function is supplied for it.
 dnl
 dnl     strerror: returns a string that corresponds to an errno.
 dnl             A replacement function is supplied for it.
 dnl
 dnl     strnlen: is a gnu function similar to strlen, but safer.
 dnl            We wrote a tolerably-fast replacement function for it.
 dnl
 dnl     strndup: is a gnu function similar to strdup, but safer.
 dnl            We wrote a tolerably-fast replacement function for it.
 
 AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir strerror strchrnul strnlen strndup)
 
 dnl ===============================================
 dnl Libraries
 dnl ===============================================
 AC_CHECK_LIB(socket, socket)                    dnl -lsocket
 AC_CHECK_LIB(c, dlopen)                         dnl if dlopen is in libc...
 AC_CHECK_LIB(dl, dlopen)                        dnl -ldl (for Linux)
 AC_CHECK_LIB(rt, sched_getscheduler)            dnl -lrt (for Tru64)
 AC_CHECK_LIB(gnugetopt, getopt_long)            dnl -lgnugetopt ( if available )
 AC_CHECK_LIB(pam, pam_start)                    dnl -lpam (if available)
 
 PKG_CHECK_MODULES([UUID], [uuid],
                   [CPPFLAGS="${CPPFLAGS} ${UUID_CFLAGS}"
                    LIBS="${LIBS} ${UUID_LIBS}"])
 
 AC_CHECK_FUNCS([sched_setscheduler])
 AS_IF([test x"$ac_cv_func_sched_setscheduler" != x"yes"],
       [PC_LIBS_RT=""],
       [PC_LIBS_RT="-lrt"])
 AC_SUBST(PC_LIBS_RT)
 
 # Require minimum glib version
 PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.42.0],
                   [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}"
                    LIBS="${LIBS} ${GLIB_LIBS}"])
 
 # Check whether high-resolution sleep function is available
 AC_CHECK_FUNCS([nanosleep usleep])
 
 #
 # Where is dlopen?
 #
 AS_IF([test x"$ac_cv_lib_c_dlopen" = x"yes"],
       [LIBADD_DL=""],
       [test x"$ac_cv_lib_dl_dlopen" = x"yes"],
       [LIBADD_DL=-ldl],
       [LIBADD_DL=${lt_cv_dlopen_libs}])
 
 PKG_CHECK_MODULES(LIBXML2, [libxml-2.0],
                   [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}"
                    LIBS="${LIBS} ${LIBXML2_LIBS}"])
 
 REQUIRE_LIB([xslt], [xsltApplyStylesheet])
 
 dnl ========================================================================
 dnl Headers
 dnl ========================================================================
 
 # Some distributions insert #warnings into deprecated headers. If we will
 # enable fatal warnings for the build, then enable them for the header checks
 # as well, otherwise the build could fail even though the header check
 # succeeds. (We should probably be doing this in more places.)
 cc_temp_flags "$CFLAGS $WERROR"
 
 # Optional headers (inclusion of these should be conditional in C code)
 AC_CHECK_HEADERS([linux/swab.h])
 AC_CHECK_HEADERS([stddef.h])
 AC_CHECK_HEADERS([sys/signalfd.h])
 AC_CHECK_HEADERS([uuid/uuid.h])
 AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h])
 
 # Required headers
 REQUIRE_HEADER([arpa/inet.h])
 REQUIRE_HEADER([ctype.h])
 REQUIRE_HEADER([dirent.h])
 REQUIRE_HEADER([errno.h])
 REQUIRE_HEADER([glib.h])
 REQUIRE_HEADER([grp.h])
 REQUIRE_HEADER([limits.h])
 REQUIRE_HEADER([netdb.h])
 REQUIRE_HEADER([netinet/in.h])
 REQUIRE_HEADER([netinet/ip.h], [
     #include <sys/types.h>
     #include <netinet/in.h>
 ])
 REQUIRE_HEADER([pwd.h])
 REQUIRE_HEADER([signal.h])
 REQUIRE_HEADER([stdio.h])
 REQUIRE_HEADER([stdlib.h])
 REQUIRE_HEADER([string.h])
 REQUIRE_HEADER([strings.h])
 REQUIRE_HEADER([sys/ioctl.h])
 REQUIRE_HEADER([sys/param.h])
 REQUIRE_HEADER([sys/reboot.h])
 REQUIRE_HEADER([sys/resource.h])
 REQUIRE_HEADER([sys/socket.h])
 REQUIRE_HEADER([sys/stat.h])
 REQUIRE_HEADER([sys/time.h])
 REQUIRE_HEADER([sys/types.h])
 REQUIRE_HEADER([sys/utsname.h])
 REQUIRE_HEADER([sys/wait.h])
 REQUIRE_HEADER([time.h])
 REQUIRE_HEADER([unistd.h])
 REQUIRE_HEADER([libxml/xpath.h])
 REQUIRE_HEADER([libxslt/xslt.h])
 
 cc_restore_flags
 
 AC_CHECK_FUNCS([uuid_unparse], [],
                [AC_MSG_FAILURE([Could not find required C function uuid_unparse()])])
 
 AC_CACHE_CHECK([whether __progname and __progname_full are available],
                [pf_cv_var_progname],
                [AC_LINK_IFELSE(
                    [AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]],
                                     [[__progname = "foo"; __progname_full = "foo bar";]])],
                    [pf_cv_var_progname="yes"],
                    [pf_cv_var_progname="no"]
                )]
               )
 AS_IF([test x"$pf_cv_var_progname" = x"yes"],
       [AC_DEFINE(HAVE_PROGNAME,1,[Define to 1 if processes can change their name])])
 
 dnl ========================================================================
 dnl Generic declarations
 dnl ========================================================================
 
 AC_CHECK_DECLS([CLOCK_MONOTONIC], [PCMK_FEATURES="$PCMK_FEATURES monotonic"], [], [[
     #include <time.h>
 ]])
 
 dnl ========================================================================
 dnl Unit test declarations
 dnl ========================================================================
 
 AC_CHECK_DECLS([assert_float_equal], [], [], [[
     #include <stdarg.h>
     #include <stddef.h>
     #include <setjmp.h>
     #include <cmocka.h>
 ]])
 
 cc_temp_flags "$CFLAGS -Wl,--wrap=uname"
 
 WRAPPABLE_UNAME="no"
 
 AC_MSG_CHECKING([if uname() can be wrapped])
 AC_RUN_IFELSE([AC_LANG_SOURCE([[
 #include <sys/utsname.h>
 int __wrap_uname(struct utsname *buf) {
 return 100;
 }
 int main(int argc, char **argv) {
 struct utsname x;
 return uname(&x) == 100 ? 0 : 1;
 }
 ]])],
                    [ WRAPPABLE_UNAME="yes" ], [ WRAPPABLE_UNAME="no"])
 AC_MSG_RESULT([$WRAPPABLE_UNAME])
 AM_CONDITIONAL([WRAPPABLE_UNAME], [test x"$WRAPPABLE_UNAME" = x"yes"])
 
 cc_restore_flags
 
 dnl ========================================================================
 dnl Structures
 dnl ========================================================================
 
 AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include <time.h>]])
 AC_CHECK_MEMBER([struct dirent.d_type],
     AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),,
     [#include <dirent.h>])
 
 dnl ========================================================================
 dnl Functions
 dnl ========================================================================
 
 REQUIRE_FUNC([getopt])
 REQUIRE_FUNC([setenv])
 REQUIRE_FUNC([unsetenv])
 REQUIRE_FUNC([vasprintf])
 
 AC_CACHE_CHECK(whether sscanf supports %m,
                pf_cv_var_sscanf,
                AC_RUN_IFELSE([AC_LANG_SOURCE([[
 #include <stdio.h>
 const char *s = "some-command-line-arg";
 int main(int argc, char **argv) {
 char *name = NULL;
 int n = sscanf(s, "%ms", &name);
 return n == 1 ? 0 : 1;
 }
 ]])],
                                  pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no"))
 
 AS_IF([test x"$pf_cv_var_sscanf" = x"yes"],
       [AC_DEFINE([HAVE_SSCANF_M], [1],
                  [Define to 1 if sscanf %m modifier is available])])
 
 dnl ========================================================================
 dnl   bzip2
 dnl ========================================================================
 REQUIRE_HEADER([bzlib.h])
 REQUIRE_LIB([bz2], [BZ2_bzBuffToBuffCompress])
 
 dnl ========================================================================
 dnl sighandler_t is missing from Illumos, Solaris11 systems
 dnl ========================================================================
 
 AC_MSG_CHECKING([for sighandler_t])
 AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <signal.h>]], [[sighandler_t *f;]])],
                   [
                       AC_MSG_RESULT([yes])
                       AC_DEFINE([HAVE_SIGHANDLER_T], [1],
                                 [Define to 1 if sighandler_t is available])
                   ],
                   [AC_MSG_RESULT([no])])
 
 dnl ========================================================================
 dnl   ncurses
 dnl ========================================================================
 dnl
 dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses".
 dnl Many non-Linux deliver "curses"; sites may add "ncurses".
 dnl
 dnl However, the source-code recommendation for both is to #include "curses.h"
 dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h").
 dnl
 dnl ncurses takes precedence.
 dnl
 AC_CHECK_HEADERS([curses.h curses/curses.h ncurses.h ncurses/ncurses.h])
 
 dnl Although n-library is preferred, only look for it if the n-header was found.
 CURSESLIBS=''
 PC_NAME_CURSES=""
 PC_LIBS_CURSES=""
 AS_IF([test x"$ac_cv_header_ncurses_h" = x"yes"], [
     AC_CHECK_LIB(ncurses, printw,
                  [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)])
     CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
     PC_NAME_CURSES="ncurses"
 ])
 
 AS_IF([test x"$ac_cv_header_ncurses_ncurses_h" = x"yes"], [
     AC_CHECK_LIB(ncurses, printw,
                  [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)])
     CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
     PC_NAME_CURSES="ncurses"
 ])
 
 dnl Only look for non-n-library if there was no n-library.
 AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_h" = x"yes"], [
     AC_CHECK_LIB(curses, printw,
                  [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)])
     PC_LIBS_CURSES="$CURSESLIBS"
 ])
 
 dnl Only look for non-n-library if there was no n-library.
 AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_curses_h" = x"yes"], [
     AC_CHECK_LIB(curses, printw,
                  [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)])
     PC_LIBS_CURSES="$CURSESLIBS"
 ])
 
 AS_IF([test x"$CURSESLIBS" != x""],
       [PCMK_FEATURES="$PCMK_FEATURES ncurses"])
 
 dnl Check for printw() prototype compatibility
 AS_IF([test x"$CURSESLIBS" != x"" && cc_supports_flag -Wcast-qual], [
     ac_save_LIBS=$LIBS
     LIBS="$CURSESLIBS"
 
     # avoid broken test because of hardened build environment in Fedora 23+
     # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages
     # - https://bugzilla.redhat.com/1297985
     AS_IF([cc_supports_flag -fPIC],
 	  [cc_temp_flags "-Wcast-qual $WERROR -fPIC"],
 	  [cc_temp_flags "-Wcast-qual $WERROR"])
 
     AC_MSG_CHECKING([whether curses library is compatible])
     AC_LINK_IFELSE(
         [AC_LANG_PROGRAM([
 #if defined(HAVE_NCURSES_H)
 #  include <ncurses.h>
 #elif defined(HAVE_NCURSES_NCURSES_H)
 #  include <ncurses/ncurses.h>
 #elif defined(HAVE_CURSES_H)
 #  include <curses.h>
 #endif
                          ],
                          [printw((const char *)"Test");]
         )],
         [AC_MSG_RESULT([yes])],
         [
             AC_MSG_RESULT([no])
             AC_MSG_WARN(m4_normalize([Disabling curses because the printw()
                                       function of your (n)curses library is old.
                                       If you wish to enable curses, update to a
                                       newer version (ncurses 5.4 or later is
                                       recommended, available from
                                       https://invisible-island.net/ncurses/)
                                      ]))
             AC_DEFINE([HAVE_INCOMPATIBLE_PRINTW], [1],
                       [Define to 1 if curses library has incompatible printw()])
         ]
     )
 
     LIBS=$ac_save_LIBS
     cc_restore_flags
 ])
 
 AC_SUBST(CURSESLIBS)
 AC_SUBST(PC_NAME_CURSES)
 AC_SUBST(PC_LIBS_CURSES)
 
 dnl ========================================================================
 dnl    Profiling and GProf
 dnl ========================================================================
 
 CFLAGS_ORIG="$CFLAGS"
 AS_IF([test $with_coverage -ne $DISABLED],
       [
         with_profiling=$REQUIRED
         PCMK_FEATURES="$PCMK_FEATURES coverage"
         CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage"
         dnl During linking, make sure to specify -lgcov or -coverage
       ]
 )
 
 AS_IF([test $with_profiling -ne $DISABLED],
       [
           with_profiling=$REQUIRED
           PCMK_FEATURES="$PCMK_FEATURES profile"
 
           dnl Disable various compiler optimizations
           CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin"
           dnl CFLAGS="$CFLAGS -fno-inline-functions"
           dnl CFLAGS="$CFLAGS -fno-default-inline"
           dnl CFLAGS="$CFLAGS -fno-inline-functions-called-once"
           dnl CFLAGS="$CFLAGS -fno-optimize-sibling-calls"
 
           dnl Turn off optimization so tools can get accurate line numbers
           CFLAGS=`echo $CFLAGS | sed \
                   -e 's/-O.\ //g' \
                   -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' \
                   -e 's/-D_FORTIFY_SOURCE=.\ //g'`
           CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2"
 
           AC_MSG_NOTICE([CFLAGS before adding profiling options: $CFLAGS_ORIG])
           AC_MSG_NOTICE([CFLAGS after: $CFLAGS])
       ]
 )
 AC_DEFINE_UNQUOTED([SUPPORT_PROFILING], [$with_profiling], [Support profiling])
 AM_CONDITIONAL([BUILD_PROFILING], [test "$with_profiling" = "$REQUIRED"])
 
 dnl ========================================================================
 dnl    Cluster infrastructure - LibQB
 dnl ========================================================================
 
 PKG_CHECK_MODULES(libqb, libqb >= 0.17)
 CPPFLAGS="$libqb_CFLAGS $CPPFLAGS"
 LIBS="$libqb_LIBS $LIBS"
 
 dnl libqb 2.0.5+ (2022-03)
 AC_CHECK_FUNCS([qb_ipcc_connect_async])
 
 dnl libqb 2.0.2+ (2020-10)
 AC_CHECK_FUNCS([qb_ipcc_auth_get])
 
 dnl libqb 2.0.0+ (2020-05)
 CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN])
 CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS])
 
 dnl Support Linux-HA fence agents if available
 AS_IF([test x"$cross_compiling" != x"yes"],
       [CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat"])
 AC_CHECK_HEADERS([stonith/stonith.h],
                  [
                      AC_CHECK_LIB([pils], [PILLoadPlugin])
                      AC_CHECK_LIB([plumb], [G_main_add_IPC_Channel])
                      PCMK_FEATURES="$PCMK_FEATURES lha"
                  ])
 AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test x"$ac_cv_header_stonith_stonith_h" = x"yes"])
 
 dnl ===============================================
 dnl Variables needed for substitution
 dnl ===============================================
 CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker"
 AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema)
 AC_SUBST(CRM_SCHEMA_DIRECTORY)
 
 CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores"
 AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"],
 		   [Directory Pacemaker daemons should change to (without systemd, core files will go here)])
 AC_SUBST(CRM_CORE_DIR)
 
 AS_IF([test x"${CRM_DAEMON_USER}" = x""],
       [CRM_DAEMON_USER="hacluster"])
 AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as)
 AC_SUBST(CRM_DAEMON_USER)
 
 AS_IF([test x"${CRM_DAEMON_GROUP}" = x""],
       [CRM_DAEMON_GROUP="haclient"])
 AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as)
 AC_SUBST(CRM_DAEMON_GROUP)
 
 CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker
 AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons)
 AC_SUBST(CRM_PACEMAKER_DIR)
 
 CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox
 AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps)
 AC_SUBST(CRM_BLACKBOX_DIR)
 
 PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine"
 AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs)
 AC_SUBST(PE_STATE_DIR)
 
 CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib"
 AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files)
 AC_SUBST(CRM_CONFIG_DIR)
 
 CRM_DAEMON_DIR="${libexecdir}/pacemaker"
 AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons)
 AC_SUBST(CRM_DAEMON_DIR)
 
 CRM_STATE_DIR="${runstatedir}/crm"
 AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"],
                    [Where to keep state files and sockets])
 AC_SUBST(CRM_STATE_DIR)
 
 CRM_RSCTMP_DIR="${runstatedir}/resource-agents"
 AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files)
 AC_SUBST(CRM_RSCTMP_DIR)
 
 PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker"
 AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey)
 AC_SUBST(PACEMAKER_CONFIG_DIR)
 
 AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries])
 
 AC_PATH_PROGS(GIT, git false)
 
 AC_MSG_CHECKING([build version])
 BUILD_VERSION=$Format:%h$
 AS_IF([test $BUILD_VERSION != ":%h$"],
       [AC_MSG_RESULT([$BUILD_VERSION (archive hash)])],
       [test -x $GIT && test -d .git],
       [
           BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1`
           AC_MSG_RESULT([$BUILD_VERSION (git hash)])
       ],
       [
           # The current directory name make a reasonable default
           # Most generated archives will include the hash or tag
           BASE=`basename $PWD`
           BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::`
           AC_MSG_RESULT([$BUILD_VERSION (directory name)])
       ])
 
 AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
 AC_SUBST(BUILD_VERSION)
 
 HAVE_dbus=1
 PKG_CHECK_MODULES([DBUS], [dbus-1],
                   [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"],
                   [HAVE_dbus=0])
 AC_DEFINE_UNQUOTED(HAVE_DBUS, $HAVE_dbus, Support dbus)
 AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1)
 dnl libdbus 1.5.12+ (2012-03) / 1.6.0+ (2012-06)
 AC_CHECK_TYPES([DBusBasicValue],,,[[#include <dbus/dbus.h>]])
 AS_IF([test $HAVE_dbus = 0],
       [PC_NAME_DBUS=""],
       [PC_NAME_DBUS="dbus-1"])
 AC_SUBST(PC_NAME_DBUS)
 
 AS_CASE([$enable_systemd],
         [$REQUIRED], [
             AS_IF([test $HAVE_dbus = 0],
                   [AC_MSG_FAILURE([Cannot support systemd resources without DBus])])
             AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"],
                   [AC_MSG_FAILURE([Cannot support systemd resources without monotonic clock])])
             AS_IF([check_systemdsystemunitdir], [],
                   [AC_MSG_FAILURE([Cannot support systemd resources without systemdsystemunitdir])])
         ],
         [$OPTIONAL], [
             AS_IF([test $HAVE_dbus = 0 \
                    || test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"],
                   [enable_systemd=$DISABLED],
                   [
                       AC_MSG_CHECKING([for systemd version (using dbus-send)])
                       ret=$({ dbus-send --system --print-reply \
                                   --dest=org.freedesktop.systemd1 \
                                   /org/freedesktop/systemd1 \
                                   org.freedesktop.DBus.Properties.Get \
                                   string:org.freedesktop.systemd1.Manager \
                                   string:Version 2>/dev/null \
                               || echo "version unavailable"; } | tail -n1)
                       # sanitize output a bit (interested just in value, not type),
                       # ret is intentionally unenquoted so as to normalize whitespace
                       ret=$(echo ${ret} | cut -d' ' -f2-)
                       AC_MSG_RESULT([${ret}])
                       AS_IF([test x"$ret" != x"unavailable" \
                              || systemctl --version 2>/dev/null | grep -q systemd],
                             [
                                 AS_IF([check_systemdsystemunitdir],
                                       [enable_systemd=$REQUIRED],
                                       [enable_systemd=$DISABLED])
                             ],
                             [enable_systemd=$DISABLED]
                       )
                   ])
         ],
 )
 AC_MSG_CHECKING([whether to enable support for managing resources via systemd])
 AS_IF([test $enable_systemd -eq $DISABLED], [AC_MSG_RESULT([no])],
       [
           AC_MSG_RESULT([yes])
           PCMK_FEATURES="$PCMK_FEATURES systemd"
       ]
 )
 AC_SUBST([systemdsystemunitdir])
 AC_DEFINE_UNQUOTED([SUPPORT_SYSTEMD], [$enable_systemd],
                    [Support systemd resources])
 AM_CONDITIONAL([BUILD_SYSTEMD], [test $enable_systemd = $REQUIRED])
 AC_SUBST(SUPPORT_SYSTEMD)
 
 AS_CASE([$enable_upstart],
         [$REQUIRED], [
             AS_IF([test $HAVE_dbus = 0],
                   [AC_MSG_FAILURE([Cannot support Upstart resources without DBus])])
         ],
         [$OPTIONAL], [
             AS_IF([test $HAVE_dbus = 0], [enable_upstart=$DISABLED],
                   [
                       AC_MSG_CHECKING([for Upstart version (using dbus-send)])
                       ret=$({ dbus-send --system --print-reply \
                                   --dest=com.ubuntu.Upstart \
                                   /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \
                                   string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \
                               || echo "version unavailable"; } | tail -n1)
                       # sanitize output a bit (interested just in value, not type),
                       # ret is intentionally unenquoted so as to normalize whitespace
                       ret=$(echo ${ret} | cut -d' ' -f2-)
                       AC_MSG_RESULT([${ret}])
                       AS_IF([test x"$ret" != x"unavailable" \
                              || initctl --version 2>/dev/null | grep -q upstart],
                             [enable_upstart=$REQUIRED],
                             [enable_upstart=$DISABLED]
                       )
                   ])
         ],
 )
 AC_MSG_CHECKING([whether to enable support for managing resources via Upstart])
 AS_IF([test $enable_upstart -eq $DISABLED], [AC_MSG_RESULT([no])],
       [
           AC_MSG_RESULT([yes])
           PCMK_FEATURES="$PCMK_FEATURES upstart"
       ]
 )
 AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart],
                    [Support Upstart resources])
 AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED])
 AC_SUBST(SUPPORT_UPSTART)
 
 AS_CASE([$with_nagios],
         [$REQUIRED], [
             AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"],
                   [AC_MSG_FAILURE([Cannot support nagios resources without monotonic clock])])
         ],
         [$OPTIONAL], [
             AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"],
                   [with_nagios=$DISABLED], [with_nagios=$REQUIRED])
         ]
 )
 AS_IF([test $with_nagios -eq $REQUIRED], [PCMK_FEATURES="$PCMK_FEATURES nagios"])
 AC_DEFINE_UNQUOTED([SUPPORT_NAGIOS], [$with_nagios], [Support nagios plugins])
 AM_CONDITIONAL([BUILD_NAGIOS], [test $with_nagios -eq $REQUIRED])
 
 AS_IF([test x"$NAGIOS_PLUGIN_DIR" = x""],
       [NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"])
 
 AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins)
 AC_SUBST(NAGIOS_PLUGIN_DIR)
 
 AS_IF([test x"$NAGIOS_METADATA_DIR" = x""],
       [NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata"])
 
 AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata)
 AC_SUBST(NAGIOS_METADATA_DIR)
 
 STACKS=""
 CLUSTERLIBS=""
 PC_NAME_CLUSTER=""
 
 dnl ========================================================================
 dnl    Cluster stack - Corosync
 dnl ========================================================================
 
 COROSYNC_LIBS=""
 
 AS_CASE([$with_corosync],
         [$REQUIRED], [
             # These will be fatal if unavailable
             PKG_CHECK_MODULES([cpg], [libcpg])
             PKG_CHECK_MODULES([cfg], [libcfg])
             PKG_CHECK_MODULES([cmap], [libcmap])
             PKG_CHECK_MODULES([quorum], [libquorum])
             PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common])
         ]
         [$OPTIONAL], [
             PKG_CHECK_MODULES([cpg], [libcpg], [], [with_corosync=$DISABLED])
             PKG_CHECK_MODULES([cfg], [libcfg], [], [with_corosync=$DISABLED])
             PKG_CHECK_MODULES([cmap], [libcmap], [], [with_corosync=$DISABLED])
             PKG_CHECK_MODULES([quorum], [libquorum], [], [with_corosync=$DISABLED])
             PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common], [], [with_corosync=$DISABLED])
             AS_IF([test $with_corosync -ne $DISABLED], [with_corosync=$REQUIRED])
         ]
 )
 AS_IF([test $with_corosync -ne $DISABLED],
       [
           AC_MSG_CHECKING([for Corosync 2 or later])
           AC_MSG_RESULT([yes])
           CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS"
           CPPFLAGS="$CPPFLAGS `$PKG_CONFIG --cflags-only-I corosync`"
           COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS"
           CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS"
           PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum"
           STACKS="$STACKS corosync-ge-2"
 
           dnl Shutdown tracking added (back) to corosync Jan 2021
           saved_LIBS="$LIBS"
           LIBS="$LIBS $COROSYNC_LIBS"
           AC_CHECK_FUNCS([corosync_cfg_trackstart])
           LIBS="$saved_LIBS"
       ]
 )
 AC_DEFINE_UNQUOTED([SUPPORT_COROSYNC], [$with_corosync],
                    [Support the Corosync messaging and membership layer])
 AM_CONDITIONAL([BUILD_CS_SUPPORT], [test $with_corosync -eq $REQUIRED])
 AC_SUBST([SUPPORT_COROSYNC])
 
 dnl
 dnl    Cluster stack - Sanity
 dnl
 
 AS_IF([test x"$STACKS" != x""], [AC_MSG_NOTICE([Supported stacks:${STACKS}])],
       [AC_MSG_FAILURE([At least one cluster stack must be supported])])
 
 PCMK_FEATURES="${PCMK_FEATURES}${STACKS}"
 
 AC_SUBST(CLUSTERLIBS)
 AC_SUBST(PC_NAME_CLUSTER)
 
 dnl ========================================================================
 dnl    CIB secrets
 dnl ========================================================================
 
 AS_IF([test $with_cibsecrets -ne $DISABLED],
       [
           with_cibsecrets=$REQUIRED
           PCMK_FEATURES="$PCMK_FEATURES cibsecrets"
           LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets"
           AC_DEFINE_UNQUOTED([LRM_CIBSECRETS_DIR], ["$LRM_CIBSECRETS_DIR"],
                              [Location for CIB secrets])
           AC_SUBST([LRM_CIBSECRETS_DIR])
       ]
 )
 AC_DEFINE_UNQUOTED([SUPPORT_CIBSECRETS], [$with_cibsecrets], [Support CIB secrets])
 AM_CONDITIONAL([BUILD_CIBSECRETS], [test $with_cibsecrets -eq $REQUIRED])
 
 dnl ========================================================================
 dnl    GnuTLS
 dnl ========================================================================
 
 dnl Require GnuTLS >=2.12.0 (2011-03) for Pacemaker Remote support
 PC_NAME_GNUTLS=""
 AS_CASE([$with_gnutls],
         [$REQUIRED], [
             REQUIRE_LIB([gnutls], [gnutls_sec_param_to_pk_bits])
             REQUIRE_HEADER([gnutls/gnutls.h])
         ],
         [$OPTIONAL], [
             AC_CHECK_LIB([gnutls], [gnutls_sec_param_to_pk_bits],
                          [], [with_gnutls=$DISABLED])
             AC_CHECK_HEADERS([gnutls/gnutls.h], [], [with_gnutls=$DISABLED])
         ]
 )
 AS_IF([test $with_gnutls -ne $DISABLED],
       [
           PC_NAME_GNUTLS="gnutls"
           PCMK_FEATURES="$PCMK_FEATURES remote"
       ]
 )
 AC_SUBST([PC_NAME_GNUTLS])
 AM_CONDITIONAL([BUILD_REMOTE], [test $with_gnutls -ne $DISABLED])
 
 # --- ASAN/UBSAN/TSAN (see man gcc) ---
 # when using SANitizers, we need to pass the -fsanitize..
 # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be
 # specified as first in the list or there will be runtime
 # issues (for example user has to LD_PRELOAD asan for it to work
 # properly).
 
 AS_IF([test -n "${SANITIZERS}"], [
   SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g')
   for SANITIZER in $SANITIZERS
   do
     AS_CASE([$SANITIZER],
             [asan|ASAN], [
                 SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address"
                 SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan"
                 PCMK_FEATURES="$PCMK_FEATURES asan"
                 REQUIRE_LIB([asan],[main])
             ],
             [ubsan|UBSAN], [
                 SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined"
                 SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan"
                 PCMK_FEATURES="$PCMK_FEATURES ubsan"
                 REQUIRE_LIB([ubsan],[main])
             ],
             [tsan|TSAN], [
                 SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread"
                 SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan"
                 PCMK_FEATURES="$PCMK_FEATURES tsan"
                 REQUIRE_LIB([tsan],[main])
             ])
   done
 ])
 
 
 dnl ========================================================================
 dnl Compiler flags
 dnl ========================================================================
 
 dnl Make sure that CFLAGS is not exported. If the user did
 dnl not have CFLAGS in their environment then this should have
 dnl no effect. However if CFLAGS was exported from the user's
 dnl environment, then the new CFLAGS will also be exported
 dnl to sub processes.
 AS_IF([export | fgrep " CFLAGS=" > /dev/null],
       [
           SAVED_CFLAGS="$CFLAGS"
           unset CFLAGS
           CFLAGS="$SAVED_CFLAGS"
           unset SAVED_CFLAGS
       ])
 
 CC_EXTRAS=""
 
 AS_IF([test x"$GCC" != x"yes"], [CFLAGS="$CFLAGS -g"], [
     CFLAGS="$CFLAGS -ggdb"
 
 dnl When we don't have diagnostic push / pull, we can't explicitly disable
 dnl checking for nonliteral formats in the places where they occur on purpose
 dnl thus we disable nonliteral format checking globally as we are aborting
 dnl on warnings. 
 dnl what makes the things really ugly is that nonliteral format checking is 
 dnl obviously available as an extra switch in very modern gcc but for older
 dnl gcc this is part of -Wformat=2 
 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral
 dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2
 dnl otherwise none of both
 
     gcc_diagnostic_push_pull=no
     cc_temp_flags "$CFLAGS $WERROR"
     AC_MSG_CHECKING([for gcc diagnostic push / pull])
     AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
 #pragma GCC diagnostic push
 #pragma GCC diagnostic pop
                       ]])],
                       [
                           AC_MSG_RESULT([yes])
                           gcc_diagnostic_push_pull=yes
                       ], AC_MSG_RESULT([no]))
     cc_restore_flags
 
     AS_IF([cc_supports_flag "-Wformat-nonliteral"],
           [gcc_format_nonliteral=yes],
           [gcc_format_nonliteral=no])
         
     # We had to eliminate -Wnested-externs because of libtool changes
     # Make sure to order options so that the former stand for prerequisites
     # of the latter (e.g., -Wformat-nonliteral requires -Wformat).
     EXTRA_FLAGS="-fgnu89-inline"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wall"
     EXTRA_FLAGS="$EXTRA_FLAGS -Waggregate-return"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wbad-function-cast"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wcast-align"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wdeclaration-after-statement"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wendif-labels"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wfloat-equal"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wformat-security"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wimplicit-fallthrough"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-prototypes"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-declarations"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wnested-externs"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wno-long-long"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wno-strict-aliasing"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wpointer-arith"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wstrict-prototypes"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wwrite-strings"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wunused-but-set-variable"
     EXTRA_FLAGS="$EXTRA_FLAGS -Wunsigned-char"
 
     AS_IF([test x"$gcc_diagnostic_push_pull" = x"yes"],
           [
               AC_DEFINE([HAVE_FORMAT_NONLITERAL], [],
                         [gcc can complain about nonliterals in format])
               EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral"
           ],
           [test x"$gcc_format_nonliteral" = x"yes"],
           [EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"])
 
 # Additional warnings it might be nice to enable one day
 #                -Wshadow
 #                -Wunreachable-code
     for j in $EXTRA_FLAGS
     do
         AS_IF([cc_supports_flag $CC_EXTRAS $j], [CC_EXTRAS="$CC_EXTRAS $j"])
     done
 
     AC_MSG_NOTICE([Using additional gcc flags: ${CC_EXTRAS}])
 ])
 
 dnl
 dnl Hardening flags
 dnl
 dnl The prime control of whether to apply (targeted) hardening build flags and
 dnl which ones is --{enable,disable}-hardening option passed to ./configure:
 dnl
 dnl --enable-hardening=try (default):
 dnl     depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE,
 dnl     CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables
 dnl     (see below) is set and non-null, all these custom flags (even if not
 dnl     set) are used as are, otherwise the best effort is made to offer
 dnl     reasonably strong hardening in several categories (RELRO, PIE,
 dnl     "bind now", stack protector) according to what the selected toolchain
 dnl     can offer
 dnl
 dnl --enable-hardening:
 dnl     same effect as --enable-hardening=try when the environment variables
 dnl     in question are suppressed
 dnl
 dnl --disable-hardening:
 dnl     do not apply any targeted hardening measures at all
 dnl
 dnl The user-injected environment variables that regulate the hardening in
 dnl default case are as follows:
 dnl
 dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE
 dnl    compiler and linker flags (respectively) for daemon programs
 dnl    (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd,
 dnl    pacemaker-based, pacemaker-fenced, pacemaker-remoted,
 dnl    pacemaker-schedulerd)
 dnl
 dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB
 dnl    compiler and linker flags (respectively) for libraries linked
 dnl    with the daemon programs
 dnl
 dnl Note that these are purposedly targeted variables (addressing particular
 dnl targets all over the scattered Makefiles) and have no effect outside of
 dnl the predestined scope (e.g., CLI utilities).  For a global reach,
 dnl use CFLAGS, LDFLAGS, etc. as usual.
 dnl
 dnl For guidance on the suitable flags consult, for instance:
 dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description
 dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils
 dnl
 
 AS_IF([test $enable_hardening -eq $OPTIONAL],
       [
           AS_IF([test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0],
                 [enable_hardening=$REQUIRED],
                 [AC_MSG_NOTICE([Hardening: using custom flags from environment])]
           )
       ],
       [
           unset CFLAGS_HARDENED_EXE
           unset CFLAGS_HARDENED_LIB
           unset LDFLAGS_HARDENED_EXE
           unset LDFLAGS_HARDENED_LIB
       ]
 )
 AS_CASE([$enable_hardening],
         [$DISABLED], [AC_MSG_NOTICE([Hardening: explicitly disabled])],
         [$REQUIRED], [
             CFLAGS_HARDENED_EXE=
             CFLAGS_HARDENED_LIB=
             LDFLAGS_HARDENED_EXE=
             LDFLAGS_HARDENED_LIB=
             relro=0
             pie=0
             bindnow=0
             stackprot="none"
             # daemons incl. libs: partial RELRO
             flag="-Wl,-z,relro"
             CC_CHECK_LDFLAGS(["${flag}"],
                              [
                                  LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
                                  LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
                                  relro=1
                              ])
             # daemons: PIE for both CFLAGS and LDFLAGS
             AS_IF([cc_supports_flag -fPIE],
                   [
                       flag="-pie"
                       CC_CHECK_LDFLAGS(["${flag}"],
                                        [
                                            CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"
                                            LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
                                            pie=1
                                        ])
                   ]
             )
             # daemons incl. libs: full RELRO if sensible + as-needed linking
             #                     so as to possibly mitigate startup performance
             #                     hit caused by excessive linking with unneeded
             #                     libraries
             AS_IF([test "${relro}" = 1 && test "${pie}" = 1],
                   [
                       flag="-Wl,-z,now"
                       CC_CHECK_LDFLAGS(["${flag}"],
                                        [
                                            LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
                                            LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
                                            bindnow=1
                                        ])
                   ]
             )
             AS_IF([test "${bindnow}" = 1],
                   [
                       flag="-Wl,--as-needed"
                       CC_CHECK_LDFLAGS(["${flag}"],
                                        [
                                            LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
                                            LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
                                        ])
                   ])
             # universal: prefer strong > all > default stack protector if possible
             flag=
             AS_IF([cc_supports_flag -fstack-protector-strong],
                   [
                       flag="-fstack-protector-strong"
                       stackprot="strong"
                   ],
                   [cc_supports_flag -fstack-protector-all],
                   [
                       flag="-fstack-protector-all"
                       stackprot="all"
                   ],
                   [cc_supports_flag -fstack-protector],
                   [
                       flag="-fstack-protector"
                       stackprot="default"
                   ]
             )
             AS_IF([test -n "${flag}"], [CC_EXTRAS="${CC_EXTRAS} ${flag}"])
             # universal: enable stack clash protection if possible
             AS_IF([cc_supports_flag -fstack-clash-protection],
                   [
                       CC_EXTRAS="${CC_EXTRAS} -fstack-clash-protection"
                       AS_IF([test "${stackprot}" = "none"],
                             [stackprot="clash-only"],
                             [stackprot="${stackprot}+clash"]
                       )
                   ]
             )
             # Log a summary
             AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test x"${stackprot}" != x"none"],
                   [AC_MSG_NOTICE(m4_normalize([Hardening:
                         relro=${relro}
                         pie=${pie}
                         bindnow=${bindnow}
                         stackprot=${stackprot}]))
                   ],
                   [AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])]
             )
         ],
 )
 
 CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS"
 LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS"
 CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE"
 LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE"
 
 NON_FATAL_CFLAGS="$CFLAGS"
 AC_SUBST(NON_FATAL_CFLAGS)
 
 dnl
 dnl We reset CFLAGS to include our warnings *after* all function
 dnl checking goes on, so that our warning flags don't keep the
 dnl AC_*FUNCS() calls above from working.  In particular, -Werror will
 dnl *always* cause us troubles if we set it before here.
 dnl
 dnl
 AS_IF([test $enable_fatal_warnings -ne $DISABLED], [
     AC_MSG_NOTICE([Enabling fatal compiler warnings])
     CFLAGS="$CFLAGS $WERROR"
 ])
 AC_SUBST(CFLAGS)
 
 dnl This is useful for use in Makefiles that need to remove one specific flag
 CFLAGS_COPY="$CFLAGS"
 AC_SUBST(CFLAGS_COPY)
 
 AC_SUBST(LIBADD_DL)        dnl extra flags for dynamic linking libraries
 
 AC_SUBST(LOCALE)
 
 dnl Options for cleaning up the compiler output
 AS_IF([test $enable_quiet -ne $DISABLED],
       [
           AC_MSG_NOTICE([Suppressing make details])
           QUIET_LIBTOOL_OPTS="--silent"
           QUIET_MAKE_OPTS="-s"  # POSIX compliant
       ],
       [
           QUIET_LIBTOOL_OPTS=""
           QUIET_MAKE_OPTS=""
       ]
 )
 
 dnl Put the above variables to use
 LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)"
 MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}"
 
 # Make features list available (sorted alphabetically, without leading space)
 PCMK_FEATURES=`echo "$PCMK_FEATURES" | sed -e 's/^ //' -e 's/ /\n/g' | sort | xargs`
 AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features)
 AC_SUBST(PCMK_FEATURES)
 
 AC_SUBST(CC)
 AC_SUBST(MAKEFLAGS)
 AC_SUBST(LIBTOOL)
 AC_SUBST(QUIET_LIBTOOL_OPTS)
 
 dnl Files we output that need to be executable
 CONFIG_FILES_EXEC([agents/ocf/ClusterMon],
                   [agents/ocf/Dummy],
                   [agents/ocf/HealthCPU],
                   [agents/ocf/HealthIOWait],
                   [agents/ocf/HealthSMART],
                   [agents/ocf/Stateful],
                   [agents/ocf/SysInfo],
                   [agents/ocf/attribute],
                   [agents/ocf/controld],
                   [agents/ocf/ifspeed],
                   [agents/ocf/o2cb],
                   [agents/ocf/ping],
                   [agents/ocf/remote],
                   [agents/stonith/fence_legacy],
                   [agents/stonith/fence_watchdog],
                   [cts/cts-attrd],
                   [cts/cts-cli],
                   [cts/cts-exec],
                   [cts/cts-fencing],
                   [cts/cts-regression],
                   [cts/cts-scheduler],
-                  [cts/lxc_autogen.sh],
                   [cts/benchmark/clubench],
                   [cts/lab/CTSlab.py],
                   [cts/lab/OCFIPraTest.py],
                   [cts/lab/cluster_test],
                   [cts/lab/cts],
                   [cts/lab/cts-log-watcher],
                   [cts/support/LSBDummy],
                   [cts/support/cts-support],
                   [cts/support/fence_dummy],
                   [cts/support/pacemaker-cts-dummyd],
                   [doc/abi-check],
                   [maint/bumplibs],
                   [tools/cluster-clean],
                   [tools/cluster-helper],
                   [tools/cluster-init],
                   [tools/crm_failcount],
                   [tools/crm_master],
                   [tools/crm_report],
                   [tools/crm_standby],
                   [tools/cibsecret],
                   [tools/pcmk_simtimes])
 
 dnl Other files we output
 AC_CONFIG_FILES(Makefile                                            \
                 agents/Makefile                                     \
                 agents/alerts/Makefile                              \
                 agents/ocf/Makefile                                 \
                 agents/stonith/Makefile                             \
                 cts/Makefile                                        \
                 cts/benchmark/Makefile                              \
                 cts/lab/Makefile                                    \
                 cts/scheduler/Makefile                              \
                 cts/scheduler/dot/Makefile                          \
                 cts/scheduler/exp/Makefile                          \
                 cts/scheduler/scores/Makefile                       \
                 cts/scheduler/stderr/Makefile                       \
                 cts/scheduler/summary/Makefile                      \
                 cts/scheduler/xml/Makefile                          \
                 cts/support/Makefile                                \
                 cts/support/pacemaker-cts-dummyd@.service           \
                 daemons/Makefile                                    \
                 daemons/attrd/Makefile                              \
                 daemons/based/Makefile                              \
                 daemons/controld/Makefile                           \
                 daemons/execd/Makefile                              \
                 daemons/execd/pacemaker_remote                      \
                 daemons/execd/pacemaker_remote.service              \
                 daemons/fenced/Makefile                             \
                 daemons/pacemakerd/Makefile                         \
                 daemons/pacemakerd/pacemaker.combined.upstart       \
                 daemons/pacemakerd/pacemaker.service                \
                 daemons/pacemakerd/pacemaker.upstart                \
                 daemons/schedulerd/Makefile                         \
                 devel/Makefile                                      \
                 doc/Doxyfile                                        \
                 doc/Makefile                                        \
                 doc/sphinx/Makefile                                 \
                 etc/Makefile                                        \
                 etc/init.d/pacemaker                                \
                 etc/logrotate.d/pacemaker                           \
                 etc/sysconfig/pacemaker                             \
                 include/Makefile                                    \
                 include/crm/Makefile                                \
                 include/crm/cib/Makefile                            \
                 include/crm/common/Makefile                         \
                 include/crm/cluster/Makefile                        \
                 include/crm/fencing/Makefile                        \
                 include/crm/pengine/Makefile                        \
                 include/pcmki/Makefile                              \
                 lib/Makefile                                        \
                 lib/cib/Makefile                                    \
                 lib/cluster/Makefile                                \
                 lib/common/Makefile                                 \
                 lib/common/tests/Makefile                           \
                 lib/common/tests/acl/Makefile                       \
                 lib/common/tests/agents/Makefile                    \
                 lib/common/tests/cmdline/Makefile                   \
                 lib/common/tests/flags/Makefile                     \
                 lib/common/tests/health/Makefile                    \
                 lib/common/tests/io/Makefile                        \
                 lib/common/tests/iso8601/Makefile                   \
                 lib/common/tests/lists/Makefile                     \
                 lib/common/tests/nvpair/Makefile                    \
                 lib/common/tests/operations/Makefile                \
                 lib/common/tests/options/Makefile                   \
                 lib/common/tests/output/Makefile                    \
                 lib/common/tests/procfs/Makefile                    \
                 lib/common/tests/results/Makefile                   \
                 lib/common/tests/scores/Makefile                    \
                 lib/common/tests/strings/Makefile                   \
                 lib/common/tests/utils/Makefile                     \
                 lib/common/tests/xml/Makefile                       \
                 lib/common/tests/xpath/Makefile                     \
                 lib/fencing/Makefile                                \
                 lib/gnu/Makefile                                    \
                 lib/libpacemaker.pc                                 \
                 lib/lrmd/Makefile                                   \
                 lib/pacemaker/Makefile                              \
                 lib/pacemaker.pc                                    \
                 lib/pacemaker-cib.pc                                \
                 lib/pacemaker-cluster.pc                            \
                 lib/pacemaker-fencing.pc                            \
                 lib/pacemaker-lrmd.pc                               \
                 lib/pacemaker-service.pc                            \
                 lib/pacemaker-pe_rules.pc                           \
                 lib/pacemaker-pe_status.pc                          \
                 lib/pengine/Makefile                                \
                 lib/pengine/tests/Makefile                          \
                 lib/pengine/tests/native/Makefile                   \
                 lib/pengine/tests/rules/Makefile                    \
                 lib/pengine/tests/status/Makefile                   \
                 lib/pengine/tests/unpack/Makefile                   \
                 lib/pengine/tests/utils/Makefile                    \
                 lib/services/Makefile                               \
                 maint/Makefile                                      \
                 po/Makefile.in                                      \
                 python/Makefile                                     \
                 python/setup.py                                     \
                 python/pacemaker/Makefile                           \
                 python/pacemaker/_cts/Makefile                      \
                 python/pacemaker/_cts/tests/Makefile                \
                 python/pacemaker/buildoptions.py                    \
                 python/tests/Makefile                               \
                 replace/Makefile                                    \
                 rpm/Makefile                                        \
                 tests/Makefile                                      \
                 tools/Makefile                                      \
                 tools/crm_mon.service                               \
                 tools/crm_mon.upstart                               \
                 tools/report.collector                              \
                 tools/report.common                                 \
                 xml/Makefile                                        \
                 xml/pacemaker-schemas.pc                            \
 )
 
 dnl Now process the entire list of files added by previous
 dnl  calls to AC_CONFIG_FILES()
 AC_OUTPUT()
 
 dnl *****************
 dnl Configure summary
 dnl *****************
 
 AC_MSG_NOTICE([])
 AC_MSG_NOTICE([$PACKAGE configuration:])
 AC_MSG_NOTICE([  Version                  = ${VERSION} (Build: $BUILD_VERSION)])
 AC_MSG_NOTICE([  Features                 = ${PCMK_FEATURES}])
 AC_MSG_NOTICE([])
 AC_MSG_NOTICE([  Prefix                   = ${prefix}])
 AC_MSG_NOTICE([  Executables              = ${sbindir}])
 AC_MSG_NOTICE([  Man pages                = ${mandir}])
 AC_MSG_NOTICE([  Libraries                = ${libdir}])
 AC_MSG_NOTICE([  Header files             = ${includedir}])
 AC_MSG_NOTICE([  Arch-independent files   = ${datadir}])
 AC_MSG_NOTICE([  State information        = ${localstatedir}])
 AC_MSG_NOTICE([  System configuration     = ${sysconfdir}])
 AC_MSG_NOTICE([  OCF agents               = ${OCF_ROOT_DIR}])
 AC_MSG_NOTICE([])
 AC_MSG_NOTICE([  HA group name            = ${CRM_DAEMON_GROUP}])
 AC_MSG_NOTICE([  HA user name             = ${CRM_DAEMON_USER}])
 AC_MSG_NOTICE([])
 AC_MSG_NOTICE([  CFLAGS                   = ${CFLAGS}])
 AC_MSG_NOTICE([  CFLAGS_HARDENED_EXE      = ${CFLAGS_HARDENED_EXE}])
 AC_MSG_NOTICE([  CFLAGS_HARDENED_LIB      = ${CFLAGS_HARDENED_LIB}])
 AC_MSG_NOTICE([  LDFLAGS_HARDENED_EXE     = ${LDFLAGS_HARDENED_EXE}])
 AC_MSG_NOTICE([  LDFLAGS_HARDENED_LIB     = ${LDFLAGS_HARDENED_LIB}])
 AC_MSG_NOTICE([  Libraries                = ${LIBS}])
 AC_MSG_NOTICE([  Stack Libraries          = ${CLUSTERLIBS}])
 AC_MSG_NOTICE([  Unix socket auth method  = ${us_auth}])
diff --git a/cts/Makefile.am b/cts/Makefile.am
index a2e67384e7..bea57f5409 100644
--- a/cts/Makefile.am
+++ b/cts/Makefile.am
@@ -1,69 +1,66 @@
 #
 # Copyright 2001-2023 the Pacemaker project contributors
 #
 # The version control history for this file may have further details.
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 
 MAINTAINERCLEANFILES    = Makefile.in
 
 # Test commands and globally applicable test files should be in $(testdir),
 # and command-specific test data should be in a command-specific subdirectory.
 testdir			= $(datadir)/$(PACKAGE)/tests
 test_SCRIPTS		= cts-attrd \
 			  cts-cli		\
 			  cts-exec		\
 			  cts-fencing		\
 			  cts-regression	\
 			  cts-scheduler
 dist_test_DATA		= README.md			\
 			  valgrind-pcmk.suppressions
 
-ctsdir		= 	$(testdir)/cts
-cts_SCRIPTS		= lxc_autogen.sh
-
 clidir		= $(testdir)/cli
 dist_cli_DATA	= cli/constraints.xml 				\
 		  cli/crmadmin-cluster-remote-guest-nodes.xml	\
 		  cli/crm_diff_new.xml				\
 		  cli/crm_diff_old.xml				\
 		  cli/crm_mon.xml				\
 		  cli/crm_mon-feature_set.xml			\
 		  cli/crm_mon-partial.xml			\
 		  cli/crm_mon-rsc-maint.xml			\
 		  cli/crm_mon-T180.xml				\
 		  cli/crm_mon-unmanaged.xml			\
 		  cli/crm_resource_digests.xml			\
 		  cli/regression.acls.exp			\
 		  cli/regression.crm_mon.exp			\
 		  cli/regression.daemons.exp			\
 		  cli/regression.dates.exp			\
 		  cli/regression.error_codes.exp		\
 		  cli/regression.feature_set.exp		\
 		  cli/regression.rules.exp			\
 		  cli/regression.tools.exp			\
 		  cli/regression.upgrade.exp			\
 		  cli/regression.validity.exp			\
 		  cli/regression.access_render.exp
 
 scheduler-list:
 	@for T in "$(srcdir)"/scheduler/xml/*.xml; do       \
 		echo $$(basename $$T .xml);             \
 	done
 
 CLEANFILES	= $(builddir)/.regression.failed.diff
 
 clean-local:
 	rm -f scheduler/*/*.pe
 
 SUBDIRS	= benchmark lab scheduler support
 
 cts-support-install:
 	$(MAKE) $(AM_MAKEFLAGS) -C support cts-support
 	$(builddir)/support/cts-support install
 
 cts-support-uninstall:
 	$(MAKE) $(AM_MAKEFLAGS) -C support cts-support
 	$(builddir)/support/cts-support uninstall
diff --git a/cts/README.md b/cts/README.md
index 0ff1065bf4..95c797e812 100644
--- a/cts/README.md
+++ b/cts/README.md
@@ -1,369 +1,339 @@
 # Pacemaker Cluster Test Suite (CTS)
 
 The Cluster Test Suite (CTS) refers to all Pacemaker testing code that can be
 run in an installed environment. (Pacemaker also has unit tests that must be
 run from a source distribution.)
 
 CTS includes:
 
 * Regression tests: These test specific Pacemaker components individually (no
   integration tests). The primary front end is cts-regression in this
   directory. Run it with the --help option to see its usage.
 
   cts-regression is a wrapper for individual component regression tests also
   in this directory (cts-cli, cts-exec, cts-fencing, and cts-scheduler).
 
   The CLI and scheduler regression tests can also be run from a source
   distribution. The other regression tests can only run in an installed
   environment, and the cluster should not be running on the node running these
   tests.
 
 * The CTS lab: This is a cluster exerciser for intensively testing the behavior
   of an entire working cluster. It is primarily for developers and packagers of
   the Pacemaker source code, but it can be useful for users who wish to see how
   their cluster will react to various situations. In an installed deployment,
   the CTS lab is in the cts subdirectory of this directory; in a source
   distibution, it is in cts/lab.
 
   The CTS lab runs a randomized series of predefined tests on the cluster. CTS
   can be run against a pre-existing cluster configuration or overwrite the
   existing configuration with a test configuration.
 
 * Helpers: Some of the component regression tests and the CTS lab require
   certain helpers to be installed as root. These include a dummy LSB init
   script, dummy systemd service, etc. In a source distribution, the source for
   these is in cts/support.
 
   The tests will install these as needed and uninstall them when done. This
   means that the cluster configuration created by the CTS lab will generate
   failures if started manually after the lab exits. However, the helper
   installer can be run manually to make the configuration usable, if you want
   to do your own further testing with it:
 
       /usr/libexec/pacemaker/cts-support install
 
   As you might expect, you can also remove the helpers with:
 
       /usr/libexec/pacemaker/cts-support uninstall
 
 * Cluster benchmark: The benchmark subdirectory of this directory contains some
   cluster test environment benchmarking code. It is not particularly useful for
   end users.
 
-* LXC generator: The lxc\_autogen.sh script can be used to create some guest
-  nodes for testing using LXC containers. It is not particularly useful for end
-  users. In an installed deployment, it is in the cts subdirectory of this
-  directory; in a source distribution, it is in this directory.
-
 * Valgrind suppressions: When memory-testing Pacemaker code with valgrind,
   various bugs in non-Pacemaker libraries and such can clutter the results. The
   valgrind-pcmk.suppressions file in this directory can be used with valgrind's
   --suppressions option to eliminate many of these.
 
 
 ## Using the CTS lab
 
 ### Requirements
 
 * Three or more machines (one test exerciser and at least two cluster nodes).
 
 * The test cluster nodes should be on the same subnet and have journalling
   filesystems (ext4, xfs, etc.) for all of their filesystems other than
   /boot. You also need a number of free IP addresses on that subnet if you
   intend to test IP address takeover.
 
 * The test exerciser machine doesn't need to be on the same subnet as the test
   cluster machines. Minimal demands are made on the exerciser; it just has to
   stay up during the tests.
 
 * Tracking problems is easier if all machines' clocks are closely synchronized.
   NTP does this automatically, but you can do it by hand if you want.
 
 * The account on the exerciser used to run the CTS lab (which does not need to
   be root) must be able to ssh as root to the cluster nodes without a password
   challenge. See the Mini-HOWTO at the end of this file for details about how
   to configure ssh for this.
 
 * The exerciser needs to be able to resolve all cluster node names, whether by
   DNS or /etc/hosts.
 
 * CTS is not guaranteed to run on all platforms that Pacemaker itself does.
   It calls commands such as service that may not be provided by all OSes.
 
 
 ### Preparation
 
 * Install Pacemaker, including the testing code, on all machines. The testing
   code must be the same version as the rest of Pacemaker, and the Pacemaker
   version must be the same on the exerciser and all cluster nodes.
 
   You can install from source, although many distributions package the testing
   code (named pacemaker-cts or similar). Typically, everything needed by the
   CTS lab is installed in /usr/share/pacemaker/tests/cts.
 
 * Configure the cluster layer (Corosync) on the cluster machines (*not* the
   exerciser), and verify it works. Node names used in the cluster configuration
   *must* match the hosts' names as returned by `uname -n`; they do not have to
   match the machines' fully qualified domain names.
 
 
 ### Run
 
 The primary interface to the CTS lab is the CTSlab.py executable:
 
     /usr/share/pacemaker/tests/cts/CTSlab.py [options] <number-of-tests-to-run>
 
 As part of the options, specify the cluster nodes with --nodes, for example:
 
     --nodes "pcmk-1 pcmk-2 pcmk-3"
 
 Most people will want to save the output to a file, for example:
 
     --outputfile ~/cts.log
 
 Unless you want to test a pre-existing cluster configuration, you also want
 (*warning*: with these options, any existing configuration will be lost):
 
     --clobber-cib
     --populate-resources
 
 You can test floating IP addresses (*not* already used by any host), one per
 cluster node, by specifying the first, for example:
 
     --test-ip-base 192.168.9.100
 
 Configure some sort of fencing, for example to use fence\_xvm:
 
     --stonith xvm
 
 Putting all the above together, a command line might look like:
 
     /usr/share/pacemaker/tests/cts/CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" \
         --outputfile ~/cts.log --clobber-cib --populate-resources \
         --test-ip-base 192.168.9.100 --stonith xvm 50
 
 For more options, run with the --help option.
 
 There are also a couple of wrappers for CTSlab.py that some users may find more
 convenient: cts, which is typically installed in the same place as the rest of
 the testing code; and cluster\_test, which is in the source directory and
 typically not installed.
 
 To extract the result of a particular test, run:
 
     crm_report -T $test
 
 
 ### Optional: Memory testing
 
 Pacemaker has various options for testing memory management. On cluster nodes,
 Pacemaker components use various environment variables to control these
 options. How these variables are set varies by OS, but usually they are set in
 a file such as /etc/sysconfig/pacemaker or /etc/default/pacemaker.
 
 Valgrind is a program for detecting memory management problems such as
 use-after-free errors. If you have valgrind installed, you can enable it by
 setting the following environment variables on all cluster nodes:
 
     PCMK_valgrind_enabled=pacemaker-attrd,pacemaker-based,pacemaker-controld,pacemaker-execd,pacemaker-fenced,pacemaker-schedulerd
     VALGRIND_OPTS="--leak-check=full --trace-children=no --num-callers=25
         --log-file=/var/lib/pacemaker/valgrind-%p
         --suppressions=/usr/share/pacemaker/tests/valgrind-pcmk.suppressions
         --gen-suppressions=all"
 
 If running the CTS lab with valgrind enabled on the cluster nodes, add these
 options to CTSlab.py:
 
     --valgrind-tests --valgrind-procs "pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-schedulerd pacemaker-fenced"
 
 These options should only be set while specifically testing memory management,
 because they may slow down the cluster significantly, and they will disable
 writes to the CIB. If desired, you can enable valgrind on a subset of pacemaker
 components rather than all of them as listed above.
 
 Valgrind will put a text file for each process in the location specified by
 valgrind's --log-file option. See
 https://www.valgrind.org/docs/manual/mc-manual.html for explanations of the
 messages valgrind generates.
 
 Separately, if you are using the GNU C library, the G\_SLICE,
 MALLOC\_PERTURB\_, and MALLOC\_CHECK\_ environment variables can be set to
 affect the library's memory management functions.
 
 When using valgrind, G\_SLICE should be set to "always-malloc", which helps
 valgrind track memory by always using the malloc() and free() routines
 directly. When not using valgrind, G\_SLICE can be left unset, or set to
 "debug-blocks", which enables the C library to catch many memory errors
 but may impact performance.
 
 If the MALLOC\_PERTURB\_ environment variable is set to an 8-bit integer, the C
 library will initialize all newly allocated bytes of memory to the integer
 value, and will set all newly freed bytes of memory to the bitwise inverse of
 the integer value. This helps catch uses of uninitialized or freed memory
 blocks that might otherwise go unnoticed. Example:
 
     MALLOC_PERTURB_=221
 
 If the MALLOC\_CHECK\_ environment variable is set, the C library will check for
 certain heap corruption errors. The most useful value in testing is 3, which
 will cause the library to print a message to stderr and abort execution.
 Example:
 
     MALLOC_CHECK_=3
 
 Valgrind should be enabled for either all nodes or none when used with the CTS
 lab, but the C library variables may be set differently on different nodes.
 
 
 ### Optional: Remote node testing
 
 If the pacemaker-remoted daemon is installed on all cluster nodes, CTS will
 enable remote node tests.
 
 The remote node tests choose a random node, stop the cluster on it, start
 pacemaker-remoted on it, and add an ocf:pacemaker:remote resource to turn it
 into a remote node. When the test is done, CTS will turn the node back into
 a cluster node.
 
 To avoid conflicts, CTS will rename the node, prefixing the original node name
 with "remote-". For example, "pcmk-1" will become "remote-pcmk-1". These names
 do not need to be resolvable.
 
 The name change may require special fencing configuration, if the fence agent
 expects the node name to be the same as its hostname. A common approach is to
 specify the "remote-" names in pcmk\_host\_list. If you use
 pcmk\_host\_list=all, CTS will expand that to all cluster nodes and their
 "remote-" names.  You may additionally need a pcmk\_host\_map argument to map
 the "remote-" names to the hostnames. Example:
 
     --stonith xvm --stonith-args \
     pcmk_host_list=all,pcmk_host_map=remote-pcmk-1:pcmk-1;remote-pcmk-2:pcmk-2
 
 
 ### Optional: Remote node testing with valgrind
 
 When running the remote node tests, the Pacemaker components on the *cluster*
 nodes can be run under valgrind as described in the "Memory testing" section.
 However, pacemaker-remoted cannot be run under valgrind that way, because it is
 started by the OS's regular boot system and not by Pacemaker.
 
 Details vary by system, but the goal is to set the VALGRIND\_OPTS environment
 variable and then start pacemaker-remoted by prefixing it with the path to
 valgrind.
 
 The init script and systemd service file provided with pacemaker-remoted will
 load the pacemaker environment variables from the same location used by other
 Pacemaker components, so VALGRIND\_OPTS will be set correctly if using one of
 those.
 
 For an OS using systemd, you can override the ExecStart parameter to run
 valgrind. For example:
 
     mkdir /etc/systemd/system/pacemaker_remote.service.d
     cat >/etc/systemd/system/pacemaker_remote.service.d/valgrind.conf <<EOF
     [Service]
     ExecStart=
     ExecStart=/usr/bin/valgrind /usr/sbin/pacemaker-remoted
     EOF
 
 
-### Optional: Container testing
-
-If the --container-tests option is given to CTSlab.py, it will enable
-testing of LXC resources (currently only the RemoteLXC test,
-which starts a remote node using an LXC container).
-
-The container tests have additional package dependencies (see the toplevel
-INSTALL.md). Also, SELinux must be enabled (in either permissive or enforcing
-mode), libvirtd must be enabled and running, and root must be able to ssh
-without a password between all cluster nodes (not just from the exerciser).
-Before running the tests, you can verify your environment with:
-
-    /usr/share/pacemaker/tests/cts/lxc_autogen.sh -v
-
-LXC tests will create two containers with hardcoded parameters: a NAT'ed bridge
-named virbr0 using the IP network 192.168.123.0/24 will be created on the
-cluster node hosting the containers; the host will be assigned
-52:54:00:A8:12:35 as the MAC address and 192.168.123.1 as the IP address.
-Each container will be assigned a random MAC address starting with 52:54:,
-the IP address 192.168.123.11 or 192.168.123.12, the hostname lxc1 or lxc2
-(which will be added to the host's /etc/hosts file), and 196MB RAM.
-
-The test will revert all of the configuration when it is done.
-
-
 ### Mini-HOWTO: Allow passwordless remote SSH connections
 
 The CTS scripts run "ssh -l root" so you don't have to do any of your testing
 logged in as root on the exerciser. Here is how to allow such connections
 without requiring a password to be entered each time:
 
 * On your test exerciser, create an SSH key if you do not already have one.
   Most commonly, SSH keys will be in your ~/.ssh directory, with the
   private key file not having an extension, and the public key file
   named the same with the extension ".pub" (for example, ~/.ssh/id\_rsa.pub).
 
   If you don't already have a key, you can create one with:
 
       ssh-keygen -t rsa
 
 * From your test exerciser, authorize your SSH public key for root on all test
   machines (both the exerciser and the cluster test machines):
 
       ssh-copy-id -i ~/.ssh/id_rsa.pub root@$MACHINE
 
   You will probably have to provide your password, and possibly say
   "yes" to some questions about accepting the identity of the test machines.
 
   The above assumes you have a RSA SSH key in the specified location;
   if you have some other type of key (DSA, ECDSA, etc.), use its file name
   in the -i option above.
 
 * To verify, try this command from the exerciser machine for each
   of your cluster machines, and for the exerciser machine itself.
 
       ssh -l root $MACHINE
 
   If this works without prompting for a password, you're in business.
   If not, look at the documentation for your version of ssh.
 
 
 ## Note on the maintenance
 
 ### Tests for scheduler
 
 The source `*.xml` files are preferably kept in sync with the newest
 major (and only major, which is enough) schema version, since these
 tests are not meant to double as schema upgrade ones (except some cases
 expressly designated as such).
 
 Currently and unless something goes wrong, the procedure of upgrading
 these tests en masse is as easy as:
 
     cd "$(git rev-parse --show-toplevel)/cts"  # if not already
     pushd "$(git rev-parse --show-toplevel)/xml"
     ./regression.sh cts_scheduler -G
     popd
     git add --interactive .
     git commit -m 'XML: upgrade-M.N.xsl: apply on scheduler CTS test cases'
     git reset HEAD && git checkout .  # if some differences still remain
     ./cts-scheduler  # absolutely vital to check nothing got broken!
 
 Now, sadly, there's no proved automated way to minimize instances like this:
 
     <primitive id="rsc1" class="ocf" provider="heartbeat" type="apache">
     </primitive>
 
 that may be left behind into more canonical:
 
     <primitive id="rsc1" class="ocf" provider="heartbeat" type="apache"/>
 
 so manual editing is tasked, or perhaps `--format` or `--c14n`
 to `xmllint` will be of help (without any other side effects).
 
 If the overall process gets stuck anywhere, common sense to the rescue.
 The initial part of the above recipe can be repeated anytime to verify
 there's nothing to upgrade artificially like this, which is a desired
 state.  Note that `regression.sh` script performs validation of both
 the input and output, should the upgrade take place, implicitly, so
 there's no need of revalidation in the happy case.
diff --git a/cts/lab/CTSlab.py.in b/cts/lab/CTSlab.py.in
index 2815535ec2..127ec78acb 100644
--- a/cts/lab/CTSlab.py.in
+++ b/cts/lab/CTSlab.py.in
@@ -1,135 +1,135 @@
 #!@PYTHON@
 """ Command-line interface to Pacemaker's Cluster Test Suite (CTS)
 """
 
 __copyright__ = "Copyright 2001-2023 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import sys, signal, os
 
 pdir = os.path.dirname(sys.path[0])
 sys.path.insert(0, pdir) # So that things work from the source directory
 
 try:
     from cts.CM_corosync  import *
-    from cts.CTStests     import TestList
     from cts.CTSscenarios import *
 
     from pacemaker._cts.CTS import CtsLab
     from pacemaker._cts.audits import audit_list
     from pacemaker._cts.logging import LogFactory
+    from pacemaker._cts.tests import test_list
 except ImportError as e:
     sys.stderr.write("abort: %s\n" % e)
     sys.stderr.write("check your install and PYTHONPATH; couldn't find cts libraries in:\n%s\n" %
                      ' '.join(sys.path))
     sys.exit(1)
 
 # These are globals so they can be used by the signal handler.
 scenario = None
 LogFactory().add_stderr()
 
 
 def sig_handler(signum, frame) :
     LogFactory().log("Interrupted by signal %d"%signum)
     if scenario: scenario.summarize()
     if signum == 15 :
         if scenario: scenario.TearDown()
         sys.exit(1)
 
 
 def plural_s(n, uppercase=False):
     if n == 1:
         return ""
     elif uppercase:
         return "S"
     else:
         return "s"
 
 
 if __name__ == '__main__':
 
     Environment = CtsLab(sys.argv[1:])
     NumIter = Environment["iterations"]
     Tests = []
 
     # Set the signal handler
     signal.signal(15, sig_handler)
     signal.signal(10, sig_handler)
 
     # Create the Cluster Manager object
     cm = None
     if Environment["Stack"] == "corosync 2+":
         cm = crm_corosync()
         
     else:
         LogFactory().log("Unknown stack: "+Environment["stack"])
         sys.exit(1)
 
     if Environment["TruncateLog"]:
         if Environment["OutputFile"] is None:
             LogFactory().log("Ignoring truncate request because no output file specified")
         else:
             LogFactory().log("Truncating %s" % Environment["OutputFile"])
             with open(Environment["OutputFile"], "w") as outputfile:
                 outputfile.truncate(0)
 
     Audits = audit_list(cm)
 
     if Environment["ListTests"]:
-        Tests = TestList(cm, Audits)
+        Tests = test_list(cm, Audits)
         LogFactory().log("Total %d tests"%len(Tests))
         for test in Tests :
             LogFactory().log(str(test.name));
         sys.exit(0)
 
     elif len(Environment["tests"]) == 0:
-        Tests = TestList(cm, Audits)
+        Tests = test_list(cm, Audits)
 
     else:
         Chosen = Environment["tests"]
         for TestCase in Chosen:
            match = None
 
-           for test in TestList(cm, Audits):
+           for test in test_list(cm, Audits):
                if test.name == TestCase:
                    match = test
 
            if not match:
                LogFactory().log("--choose: No applicable/valid tests chosen")
                sys.exit(1)
            else:
                Tests.append(match)
 
     # Scenario selection
     if Environment["scenario"] == "basic-sanity":
         scenario = RandomTests(cm, [ BasicSanityCheck(Environment) ], Audits, Tests)
 
     elif Environment["scenario"] == "all-once":
         NumIter = len(Tests)
         scenario = AllOnce(
             cm, [ BootCluster(Environment) ], Audits, Tests)
     elif Environment["scenario"] == "sequence":
         scenario = Sequence(
             cm, [ BootCluster(Environment) ], Audits, Tests)
     elif Environment["scenario"] == "boot":
         scenario = Boot(cm, [ LeaveBooted(Environment)], Audits, [])
     else:
         scenario = RandomTests(
             cm, [ BootCluster(Environment) ], Audits, Tests)
 
     LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TEST" + plural_s(NumIter, True) + " ")
     LogFactory().log("Stack:                  %s (%s)" % (Environment["Stack"], Environment["Name"]))
     LogFactory().log("Schema:                 %s" % Environment["Schema"])
     LogFactory().log("Scenario:               %s" % scenario.__doc__)
     LogFactory().log("CTS Exerciser:          %s" % Environment["cts-exerciser"])
     LogFactory().log("CTS Logfile:            %s" % Environment["OutputFile"])
     LogFactory().log("Random Seed:            %s" % Environment["RandSeed"])
     LogFactory().log("Syslog variant:         %s" % Environment["syslogd"].strip())
     LogFactory().log("System log files:       %s" % Environment["LogFileName"])
     if "IPBase" in Environment:
         LogFactory().log("Base IP for resources:  %s" % Environment["IPBase"])
     LogFactory().log("Cluster starts at boot: %d" % Environment["at-boot"])
 
     Environment.dump()
     rc = Environment.run(scenario, NumIter)
     sys.exit(rc)
diff --git a/cts/lab/CTStests.py b/cts/lab/CTStests.py
deleted file mode 100644
index 31d2eef4a7..0000000000
--- a/cts/lab/CTStests.py
+++ /dev/null
@@ -1,310 +0,0 @@
-""" Test-specific classes for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-#
-#        SPECIAL NOTE:
-#
-#        Tests may NOT implement any cluster-manager-specific code in them.
-#        EXTEND the ClusterManager object to provide the base capabilities
-#        the test needs if you need to do something that the current CM classes
-#        do not.  Otherwise you screw up the whole point of the object structure
-#        in CTS.
-#
-#                Thank you.
-#
-
-import re
-import time
-
-from stat import *
-
-from pacemaker import BuildOptions
-from pacemaker._cts.CTS import NodeStatus
-from pacemaker._cts.audits import AuditResource
-from pacemaker._cts.tests import *
-from pacemaker._cts.timer import Timer
-
-AllTestClasses = [ ]
-AllTestClasses.append(FlipTest)
-AllTestClasses.append(RestartTest)
-AllTestClasses.append(StonithdTest)
-AllTestClasses.append(StartOnebyOne)
-AllTestClasses.append(SimulStart)
-AllTestClasses.append(SimulStop)
-AllTestClasses.append(StopOnebyOne)
-AllTestClasses.append(RestartOnebyOne)
-AllTestClasses.append(PartialStart)
-AllTestClasses.append(StandbyTest)
-AllTestClasses.append(MaintenanceMode)
-AllTestClasses.append(ResourceRecover)
-AllTestClasses.append(ComponentFail)
-AllTestClasses.append(SplitBrainTest)
-AllTestClasses.append(Reattach)
-AllTestClasses.append(ResyncCIB)
-AllTestClasses.append(NearQuorumPointTest)
-
-
-def TestList(cm, audits):
-    result = []
-    for testclass in AllTestClasses:
-        bound_test = testclass(cm)
-        if bound_test.is_applicable():
-            bound_test.audits = audits
-            result.append(bound_test)
-    return result
-
-
-class RemoteLXC(CTSTest):
-    def __init__(self, cm):
-        CTSTest.__init__(self,cm)
-        self.name = "RemoteLXC"
-        self._start = StartTest(cm)
-        self._startall = SimulStartLite(cm)
-        self.num_containers = 2
-        self.is_container = True
-        self.fail_string = ""
-
-    def start_lxc_simple(self, node):
-
-        # restore any artifacts laying around from a previous test.
-        self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
-
-        # generate the containers, put them in the config, add some resources to them
-        pats = [ ]
-        watch = self.create_watch(pats, 120)
-        watch.set_watch()
-        pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1"))
-        pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2"))
-        pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms"))
-        pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms"))
-
-        self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers)
-
-        with Timer(self._logger, self.name, "remoteSimpleInit"):
-            watch.look_for_all()
-
-        if watch.unmatched:
-            self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
-            self.failed = True
-
-    def cleanup_lxc_simple(self, node):
-
-        pats = [ ]
-        # if the test failed, attempt to clean up the cib and libvirt environment
-        # as best as possible 
-        if self.failed:
-            # restore libvirt and cib
-            self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
-            return
-
-        watch = self.create_watch(pats, 120)
-        watch.set_watch()
-
-        pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1"))
-        pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2"))
-
-        self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null")
-
-        with Timer(self._logger, self.name, "remoteSimpleCleanup"):
-            watch.look_for_all()
-
-        if watch.unmatched:
-            self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
-            self.failed = True
-
-        # cleanup libvirt
-        self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
-
-    def __call__(self, node):
-        '''Perform the 'RemoteLXC' test. '''
-        self.incr("calls")
-
-        ret = self._startall(None)
-        if not ret:
-            return self.failure("Setup failed, start all nodes failed.")
-
-        (rc, _) = self._rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null")
-        if rc == 1:
-            self.log("Environment test for lxc support failed.")
-            return self.skipped()
-
-        self.start_lxc_simple(node)
-        self.cleanup_lxc_simple(node)
-
-        self.debug("Waiting for the cluster to recover")
-        self._cm.cluster_stable()
-
-        if self.failed:
-            return self.failure(self.fail_string)
-
-        return self.success()
-
-    @property
-    def errors_to_ignore(self):
-        """ Return list of errors which should be ignored """
-
-        return [ r"Updating failcount for ping",
-                 r"schedulerd.*: Recover\s+(ping|lxc-ms|container)\s+\(.*\)",
-                 # The orphaned lxc-ms resource causes an expected transition error
-                 # that is a result of the scheduler not having knowledge that the
-                 # promotable resource used to be a clone. As a result, it looks like that
-                 # resource is running in multiple locations when it shouldn't... But in
-                 # this instance we know why this error is occurring and that it is expected.
-                 r"Calculated [Tt]ransition .*pe-error",
-                 r"Resource lxc-ms .* is active on 2 nodes attempting recovery",
-                 r"Unknown operation: fail",
-                 r"VirtualDomain.*ERROR: Unable to determine emulator" ]
-
-AllTestClasses.append(RemoteLXC)
-
-
-class RemoteBasic(RemoteDriver):
-    def __init__(self, cm):
-        RemoteDriver.__init__(self, cm)
-        self.name = "RemoteBasic"
-
-    def __call__(self, node):
-        '''Perform the 'RemoteBaremetal' test. '''
-
-        if not self.start_new_test(node):
-            return self.failure(self.fail_string)
-
-        self.test_attributes(node)
-        self.cleanup_metal(node)
-
-        self.debug("Waiting for the cluster to recover")
-        self._cm.cluster_stable()
-        if self.failed:
-            return self.failure(self.fail_string)
-
-        return self.success()
-
-AllTestClasses.append(RemoteBasic)
-
-class RemoteStonithd(RemoteDriver):
-    def __init__(self, cm):
-        RemoteDriver.__init__(self, cm)
-        self.name = "RemoteStonithd"
-
-    def __call__(self, node):
-        '''Perform the 'RemoteStonithd' test. '''
-
-        if not self.start_new_test(node):
-            return self.failure(self.fail_string)
-
-        self.fail_connection(node)
-        self.cleanup_metal(node)
-
-        self.debug("Waiting for the cluster to recover")
-        self._cm.cluster_stable()
-        if self.failed:
-            return self.failure(self.fail_string)
-
-        return self.success()
-
-    def is_applicable(self):
-        if not RemoteDriver.is_applicable(self):
-            return False
-
-        if "DoFencing" in self._env:
-            return self._env["DoFencing"]
-
-        return True
-
-    @property
-    def errors_to_ignore(self):
-        """ Return list of errors which should be ignored """
-
-        return [ r"Lost connection to Pacemaker Remote node",
-                 r"Software caused connection abort",
-                 r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor",
-                 r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*",
-                 r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)",
-                 r"error: Result of monitor operation for .* on remote-.*: Internal communication failure" ] + super().errors_to_ignore
-
-AllTestClasses.append(RemoteStonithd)
-
-
-class RemoteMigrate(RemoteDriver):
-    def __init__(self, cm):
-        RemoteDriver.__init__(self, cm)
-        self.name = "RemoteMigrate"
-
-    def __call__(self, node):
-        '''Perform the 'RemoteMigrate' test. '''
-
-        if not self.start_new_test(node):
-            return self.failure(self.fail_string)
-
-        self.migrate_connection(node)
-        self.cleanup_metal(node)
-
-        self.debug("Waiting for the cluster to recover")
-        self._cm.cluster_stable()
-        if self.failed:
-            return self.failure(self.fail_string)
-
-        return self.success()
-
-    def is_applicable(self):
-        if not RemoteDriver.is_applicable(self):
-            return 0
-        # This test requires at least three nodes: one to convert to a
-        # remote node, one to host the connection originally, and one
-        # to migrate the connection to.
-        if len(self._env["nodes"]) < 3:
-            return 0
-        return 1
-
-AllTestClasses.append(RemoteMigrate)
-
-
-class RemoteRscFailure(RemoteDriver):
-    def __init__(self, cm):
-        RemoteDriver.__init__(self, cm)
-        self.name = "RemoteRscFailure"
-
-    def __call__(self, node):
-        '''Perform the 'RemoteRscFailure' test. '''
-
-        if not self.start_new_test(node):
-            return self.failure(self.fail_string)
-
-        # This is an important step. We are migrating the connection
-        # before failing the resource. This verifies that the migration
-        # has properly maintained control over the remote-node.
-        self.migrate_connection(node)
-
-        self.fail_rsc(node)
-        self.cleanup_metal(node)
-
-        self.debug("Waiting for the cluster to recover")
-        self._cm.cluster_stable()
-        if self.failed:
-            return self.failure(self.fail_string)
-
-        return self.success()
-
-    @property
-    def errors_to_ignore(self):
-        """ Return list of errors which should be ignored """
-
-        return [ r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)",
-                 r"Dummy.*: No process state file found" ] + super().errors_to_ignore
-
-    def is_applicable(self):
-        if not RemoteDriver.is_applicable(self):
-            return 0
-        # This test requires at least three nodes: one to convert to a
-        # remote node, one to host the connection originally, and one
-        # to migrate the connection to.
-        if len(self._env["nodes"]) < 3:
-            return 0
-        return 1
-
-AllTestClasses.append(RemoteRscFailure)
-
-# vim:ts=4:sw=4:et:
diff --git a/cts/lab/Makefile.am b/cts/lab/Makefile.am
index f620266d65..f049ed04b9 100644
--- a/cts/lab/Makefile.am
+++ b/cts/lab/Makefile.am
@@ -1,30 +1,29 @@
 #
 # Copyright 2001-2023 the Pacemaker project contributors
 #
 # The version control history for this file may have further details.
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 
 MAINTAINERCLEANFILES    = Makefile.in
 
 noinst_SCRIPTS		= cluster_test		\
 			  OCFIPraTest.py
 
 # Commands intended to be run only via other commands
 halibdir		= $(CRM_DAEMON_DIR)
 dist_halib_SCRIPTS	= cts-log-watcher
 
 ctslibdir		= $(pythondir)/cts
 ctslib_PYTHON		= __init__.py		\
 			  CIB.py		\
 			  cib_xml.py		\
 			  ClusterManager.py	\
 			  CM_corosync.py	\
-			  CTSscenarios.py	\
-			  CTStests.py
+			  CTSscenarios.py
 
 ctsdir			= $(datadir)/$(PACKAGE)/tests/cts
 cts_SCRIPTS		= CTSlab.py		\
 			  cts
diff --git a/cts/lxc_autogen.sh.in b/cts/lxc_autogen.sh.in
deleted file mode 100644
index 195d3f9dac..0000000000
--- a/cts/lxc_autogen.sh.in
+++ /dev/null
@@ -1,545 +0,0 @@
-#!@BASH_PATH@
-#
-# Copyright 2013-2022 the Pacemaker project contributors
-#
-# The version control history for this file may have further details.
-#
-# This source code is licensed under the GNU General Public License version 2
-# or later (GPLv2+) WITHOUT ANY WARRANTY.
-#
-
-containers="2"
-download=0
-share_configs=0
-# different than default libvirt network in case this is run nested in a KVM instance
-addr="192.168.123.1"
-restore=0
-restore_pcmk=0
-restore_all=0
-generate=0
-key_gen=0
-cib=0
-anywhere=0
-add_clone=0
-verify=0
-working_dir="@CRM_PACEMAKER_DIR@/cts/lxc"
-run_dirs="/run /var/run /usr/var/run"
-
-# must be on one line b/c used inside quotes
-SSH_RSYNC_OPTS="-o UserKnownHostsFile=/dev/null -o BatchMode=yes -o StrictHostKeyChecking=no"
-
-function helptext() {
-    echo "lxc_autogen.sh - generate libvirt LXC containers for testing purposes"
-    echo ""
-    echo "Usage: lxc-autogen [options]"
-    echo ""
-    echo "Options:"
-    echo "-g, --generate         Generate libvirt LXC environment in directory this script is run from"
-    echo "-k, --key-gen          Generate Pacemaker Remote key only"
-    echo "-r, --restore-libvirt  Restore the default network and libvirt config to before this script ran"
-    echo "-p, --restore-cib      Remove CIB entries this script generated"
-    echo "-R, --restore-all      Restore both libvirt and CIB, and clean working directory"
-    echo "                       (libvirt xml files are not removed, so resource can be stopped properly)"
-    echo ""
-    echo "-A, --allow-anywhere   Allow the containers to live anywhere in the cluster"
-    echo "-a, --add-cib          Add CIB entries to create a guest node for each LXC instance"
-    echo "-C, --add-clone        Add promotable clone resource shared between LXC guest nodes"
-    echo "-d, --download-agent   Download and install latest VirtualDomain agent"
-    echo "-s, --share-configs    Synchronize on all known cluster nodes"
-    echo "-c, --containers       Specify number of containers to generate (default $containers; used with -g)"
-    echo "-n, --network          Network to override libvirt default (example: -n 192.168.123.1; used with -g)"
-    echo "-v, --verify           Verify environment is capable of running LXC"
-    echo ""
-    exit "$1"
-}
-
-while true ; do
-    case "$1" in
-    --help|-h|-\?) helptext 0;;
-    -c|--containers) containers="$2"; shift; shift;;
-    -d|--download-agent) download=1; shift;;
-    -s|--share-configs) share_configs=1; shift;;
-    -n|--network) addr="$2"; shift; shift;;
-    -r|--restore-libvirt) restore=1; shift;;
-    -p|--restore-cib) restore_pcmk=1; shift;;
-    -R|--restore-all)
-        restore_all=1
-        restore=1
-        restore_pcmk=1
-        shift;;
-    -g|--generate) generate=1; key_gen=1; shift;;
-    -k|--key-gen) key_gen=1; shift;;
-    -a|--add-cib) cib=1; shift;;
-    -A|--allow-anywhere) anywhere=1; shift;;
-    -C|--add-clone) add_clone=1; shift;;
-    -m|--add-master)
-        echo "$1 is deprecated (use -C/--add-clone instead)"
-        echo
-        add_clone=1
-        shift
-        ;;
-    -v|--verify) verify=1; shift;;
-    "") break;;
-    *) helptext 1;;
-    esac
-done
-
-if [ $verify -eq 1 ]; then
-    # verify virsh tool is available and that 
-    # we can connect to lxc driver.
-    virsh -c lxc:/// list --all > /dev/null 2>&1
-    if [ $? -ne 0 ]; then
-        echo "libvirt LXC driver must be installed (could not connect 'virsh -c lxc:///')"
-        # yum install -y libvirt-daemon-driver-lxc libvirt-daemon-lxc libvirt-login-shell
-        exit 1
-    fi
-
-    SELINUX=$(getenforce)
-    if [ "$SELINUX" != "Enforcing" ] && [ "$SELINUX" != "Permissive" ]; then
-        echo "SELINUX must be set to permissive or enforcing mode"
-        exit 1
-    fi
-
-    ps ax | grep "[l]ibvirtd"
-    if [ $? -ne 0 ]; then
-        echo "libvirtd must be running"
-        exit 1
-    fi
-
-    which rsync > /dev/null 2>&1
-    if [ $? -ne 0 ]; then
-        echo "rsync must be installed"
-    fi
-
-    which pacemaker-remoted > /dev/null 2>&1
-    if [ $? -ne 0 ]; then
-        echo "pacemaker-remoted must be installed"
-    fi
-fi
-
-#strip last digits off addr
-addr="$(echo "$addr" | awk -F. '{print $1"."$2"."$3}')"
-
-node_exec() {
-    ssh -o StrictHostKeyChecking=no \
-        -o ConnectTimeout=30 \
-        -o BatchMode=yes \
-        -l root -T "$@"
-}
-
-this_node()
-{
-    crm_node -n
-}
-
-other_nodes()
-{
-    crm_node -l | awk "\$2 != \"$(this_node)\" {print \$2}"
-}
-
-make_directory()
-{
-    # argument must be full path
-    DIR="$1"
-
-    mkdir -p "$DIR"
-    if [ $share_configs -eq 1 ]; then
-        for node in $(other_nodes); do
-            node_exec "$node" mkdir -p "$DIR"
-        done
-    fi
-}
-
-sync_file()
-{
-    TARGET="$1"
-
-    if [ $share_configs -eq 1 ]; then
-        for node in $(other_nodes); do
-            rsync -ave "ssh $SSH_RSYNC_OPTS" "$TARGET" "${node}:${TARGET}"
-        done
-    fi
-}
-
-download_agent()
-{
-    wget https://raw.github.com/ClusterLabs/resource-agents/main/heartbeat/VirtualDomain
-    chmod 755 VirtualDomain
-    mv -f VirtualDomain /usr/lib/ocf/resource.d/heartbeat/VirtualDomain
-    sync_file /usr/lib/ocf/resource.d/heartbeat/VirtualDomain
-}
-
-set_network()
-{
-    rm -f cur_network.xml
-    cat << END >> cur_network.xml
-<network>
-  <name>default</name>
-  <uuid>41ebdb84-7134-1111-a136-91f0f1119225</uuid>
-  <forward mode='nat'/>
-  <bridge name='virbr0' stp='on' delay='0' />
-  <mac address='52:54:00:A8:12:35'/>
-  <ip address='$addr.1' netmask='255.255.255.0'>
-    <dhcp>
-      <range start='$addr.2' end='$addr.254' />
-    </dhcp>
-  </ip>
-</network>
-END
-    sync_file "${working_dir}"/cur_network.xml
-}
-
-distribute_configs()
-{
-    for node in $(other_nodes); do
-        rsync -ave "ssh $SSH_RSYNC_OPTS" "${working_dir}"/lxc*.xml "${node}:${working_dir}"
-        rsync -ave "ssh $SSH_RSYNC_OPTS" "${working_dir}"/lxc*-filesystem "${node}:${working_dir}"
-    done
-}
-
-start_network()
-{
-    NODE="$1"
-
-    node_exec "$NODE" <<-EOF
-    cd "$working_dir"
-    virsh net-info default >/dev/null 2>&1
-    if [ \$? -eq 0 ]; then
-        if [ ! -f restore_default.xml ]; then
-            virsh net-dumpxml default > restore_default.xml
-        fi
-        virsh net-destroy default
-        virsh net-undefine default
-    fi
-    virsh net-define cur_network.xml
-    virsh net-start default
-    virsh net-autostart default
-EOF
-}
-
-start_network_all()
-{
-    start_network "$(this_node)"
-    if [ $share_configs -eq 1 ]; then
-        for node in $(other_nodes); do
-            start_network "$node"
-        done
-    fi
-}
-
-add_hosts_entry()
-{
-    IP="$1"
-    HNAME="$2"
-
-    echo "$IP $HNAME" >>/etc/hosts
-    if [ $share_configs -eq 1 ]; then
-        for node in $(other_nodes); do
-            node_exec "$node" "echo $IP $HNAME >>/etc/hosts"
-        done
-    fi
-}
-
-generate_key()
-{
-    if [ ! -e /etc/pacemaker/authkey ]; then
-        make_directory /etc/pacemaker
-        dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1
-        sync_file /etc/pacemaker/authkey
-    fi
-}
-
-generate()
-{
-    set_network
-
-    # Generate libvirt domains in xml
-    for (( c=1; c <= containers; c++ ))
-    do
-        # Clean any previous definition
-        rm -rf "lxc$c.xml" "lxc$c-filesystem"
-
-        # Create a basic filesystem with run directories
-        for dir in $run_dirs; do
-            mkdir -p "lxc$c-filesystem/$dir"
-        done
-
-        # Create libvirt definition
-        suffix=$((10 + c))
-        prefix="$(echo "$addr" | awk -F. '{print $1"."$2}')"
-        subnet="$(echo "$addr" | awk -F. '{print $3}')"
-        while [ $suffix -gt 255 ]; do
-            subnet=$((subnet + 1))
-            suffix=$((subnet - 255))
-        done
-        cip="$prefix.$subnet.$suffix"
-
-        cat << END >> lxc$c.xml
-<domain type='lxc'>
-  <name>lxc$c</name>
-  <memory unit='KiB'>200704</memory>
-  <os>
-    <type>exe</type>
-    <init>$working_dir/lxc$c-filesystem/launch-helper</init>
-  </os>
-  <devices>
-    <console type='pty'/>
-    <filesystem type='ram'>
-        <source usage='150528'/>
-        <target dir='/dev/shm'/>
-    </filesystem>
-END
-        for dir in $run_dirs; do
-            cat << END >> lxc$c.xml
-    <filesystem type='mount'>
-      <source dir='$working_dir/lxc$c-filesystem${dir}'/>
-      <target dir='$dir'/>
-    </filesystem>
-END
-        done
-        cat << END >> lxc$c.xml
-    <interface type='network'>
-      <mac address='52:54:$((RANDOM % 9))$((RANDOM % 9)):$((RANDOM % 9))$((RANDOM % 9)):$((RANDOM % 9))$((RANDOM % 9)):$((RANDOM % 9))$((RANDOM % 9))'/>
-      <source network='default'/>
-    </interface>
-  </devices>
-</domain>
-END
-
-        # Create CIB definition
-        rm -f "container$c.cib"
-        cat << END >> "container$c.cib"
-      <primitive class="ocf" id="container$c" provider="heartbeat" type="VirtualDomain">
-        <instance_attributes id="container$c-instance_attributes">
-          <nvpair id="container$c-instance_attributes-force_stop" name="force_stop" value="true"/>
-          <nvpair id="container$c-instance_attributes-hypervisor" name="hypervisor" value="lxc:///"/>
-          <nvpair id="container$c-instance_attributes-config" name="config" value="$working_dir/lxc$c.xml"/>
-        </instance_attributes>
-        <utilization id="container$c-utilization">
-          <nvpair id="container$c-utilization-cpu" name="cpu" value="1"/>
-          <nvpair id="container$c-utilization-hv_memory" name="hv_memory" value="100"/>
-        </utilization>
-        <meta_attributes id="container$c-meta_attributes">
-          <nvpair id="container$c-meta_attributes-remote-node" name="remote-node" value="lxc$c"/>
-        </meta_attributes>
-        <operations>
-          <op id="container$c-monitor-20s" interval="20s" name="monitor"/>
-        </operations>
-      </primitive>
-END
-
-        # Create container init
-        rm -f "lxc$c-filesystem/launch-helper"
-        cat << END >> "lxc$c-filesystem/launch-helper"
-#!@BASH_PATH@
-ip -f inet addr add "$cip/24" dev eth0
-ip link set eth0 up
-ip route add default via "$addr.1"
-hostname "lxc$c"
-df > "$working_dir/lxc$c-filesystem/disk_usage.txt"
-export PCMK_debugfile="@CRM_LOG_DIR@/pacemaker_remote_lxc$c.log"
-/usr/sbin/pacemaker-remoted
-END
-        chmod 711 "lxc$c-filesystem/launch-helper"
-
-        add_hosts_entry "$cip" "lxc$c"
-    done
-
-    # Create CIB fragment for a promotable clone resource
-    cat << END > lxc-clone.cib
-      <clone id="lxc-clone">
-        <primitive class="ocf" id="lxc-rsc" provider="pacemaker" type="Stateful">
-          <instance_attributes id="lxc-rsc-instance_attributes"/>
-          <operations>
-            <op id="lxc-rsc-monitor-interval-10s" interval="10s" name="monitor" role="Promoted" timeout="20s"/>
-            <op id="lxc-rsc-monitor-interval-11s" interval="11s" name="monitor" role="Unpromoted" timeout="20s"/>
-          </operations>
-        </primitive>
-        <meta_attributes id="lxc-clone-meta_attributes">
-          <nvpair id="lxc-clone-meta_attributes-promotable" name="promotable" value="true"/>
-          <nvpair id="lxc-clone-meta_attributes-promoted-max" name="promoted-max" value="1"/>
-          <nvpair id="lxc-clone-meta_attributes-clone-max" name="clone-max" value="$containers"/>
-        </meta_attributes>
-      </clone>
-END
-}
-
-container_names() {
-    find . -maxdepth 1 -name "lxc*.xml" -exec basename -s .xml "{}" ";"
-}
-
-apply_cib_clone()
-{
-    cibadmin -Q > cur.cib
-    export CIB_file=cur.cib
-
-    cibadmin -o resources -Mc -x lxc-clone.cib
-    for tmp in $(container_names); do
-        echo "<rsc_location id=\"lxc-clone-location-${tmp}\" node=\"${tmp}\" rsc=\"lxc-clone\" score=\"INFINITY\"/>" > tmp_constraint
-        cibadmin -o constraints -Mc -x tmp_constraint
-    done
-    # Make sure the version changes even if the content doesn't
-    cibadmin -B
-    unset CIB_file
-
-    cibadmin --replace -o configuration --xml-file cur.cib
-    rm -f cur.cib
-}
-
-apply_cib_entries()
-{
-    cibadmin -Q > cur.cib
-    export CIB_file=cur.cib
-    for tmp in container*.cib; do
-        cibadmin -o resources -Mc -x "$tmp"
-
-        remote_node="$(grep remote-node "${tmp}" | sed -n -e 's/^.*value=\"\(.*\)\".*/\1/p')"
-        if [ $anywhere -eq 0 ]; then
-            crm_resource -M -r "${tmp//\.cib/}" -H "$(this_node)"
-        fi
-        echo "<rsc_location id=\"lxc-ping-location-${remote_node}\" node=\"${remote_node}\" rsc=\"Connectivity\" score=\"-INFINITY\"/>" > tmp_constraint
-        # Ignore any failure; this constraint is just to help with CTS when the
-        # connectivity resources (which fail the guest nodes) are in use.
-        cibadmin -o constraints -Mc -x tmp_constraint > /dev/null 2>&1
-
-        for rsc in $(crm_resource -l | grep rsc_ ); do
-            echo "<rsc_location id=\"lxc-${rsc}-location-${remote_node}\" node=\"${remote_node}\" rsc=\"${rsc}\" score=\"-INFINITY\"/>" > tmp_constraint
-            cibadmin -o constraints -Mc -x tmp_constraint > /dev/null 2>&1
-        done
-
-        rm -f tmp_constraint
-    done
-
-    # Make sure the version changes even if the content doesn't
-    cibadmin -B
-
-    unset CIB_file
-
-    cibadmin --replace -o configuration --xml-file cur.cib
-    rm -f cur.cib
-}
-
-restore_cib()
-{
-    cibadmin -Q > cur.cib
-    export CIB_file=cur.cib
-
-    for tmp in $(container_names); do
-        echo "<rsc_location id=\"lxc-clone-location-${tmp}\" node=\"${tmp}\" rsc=\"lxc-clone\" score=\"INFINITY\"/>" > tmp_constraint
-        cibadmin -o constraints -D -x tmp_constraint
-        echo "<rsc_location id=\"lxc-ping-location-${tmp}\" node=\"${tmp}\" rsc=\"Connectivity\" score=\"-INFINITY\"/>" > tmp_constraint
-        cibadmin -o constraints -D -x tmp_constraint
-
-        for rsc in $(crm_resource -l | grep rsc_ ); do
-            echo "<rsc_location id=\"lxc-${rsc}-location-${tmp}\" node=\"${tmp}\" rsc=\"${rsc}\" score=\"-INFINITY\"/>" > tmp_constraint
-            cibadmin -o constraints -D -x tmp_constraint
-        done
-        rm -f tmp_constraint
-    done
-    cibadmin -o resources -D -x lxc-clone.cib
-
-    for tmp in container*.cib; do
-        tmp="${tmp//\.cib/}" 
-        crm_resource -U -r "$tmp" -H "$(this_node)"
-        crm_resource -D -r "$tmp" -t primitive
-    done
-    # Make sure the version changes even if the content doesn't
-    cibadmin -B
-    unset CIB_file
-
-    cibadmin --replace -o configuration --xml-file cur.cib
-    rm -f  cur.cib 
-
-    # Allow the cluster to stabilize before continuing
-    crm_resource --wait
-
-    # Purge nodes from caches and CIB status section
-    for tmp in $(container_names); do
-        crm_node --force --remove "$tmp"
-    done
-}
-
-restore_network()
-{
-    NODE="$1"
-
-    node_exec "$NODE" <<-EOF
-    cd "$working_dir"
-    for tmp in \$(ls lxc*.xml | sed -e 's/\.xml//g'); do
-        virsh -c lxc:/// destroy "\$tmp" >/dev/null 2>&1
-        virsh -c lxc:/// undefine "\$tmp" >/dev/null 2>&1
-        sed -i.bak "/...\....\....\..* \${tmp}/d" /etc/hosts
-    done
-    virsh net-destroy default >/dev/null 2>&1
-    virsh net-undefine default >/dev/null 2>&1
-    if [ -f restore_default.xml ]; then
-        virsh net-define restore_default.xml
-        virsh net-start default
-        rm restore_default.xml
-    fi
-EOF
-    echo "Containers destroyed and default network restored on $NODE"
-}
-
-restore_libvirt()
-{
-    restore_network "$(this_node)"
-    if [ $share_configs -eq 1 ]; then
-        for node in $(other_nodes); do
-            restore_network "$node"
-        done
-    fi
-}
-
-restore_files()
-{
-    find . -maxdepth 1 -not -name "lxc*.xml" -a -not -name . -exec rm -rf "{}" ";"
-    if [ $share_configs -eq 1 ]; then
-        for node in $(other_nodes); do
-            node_exec "$node" rm -rf \
-                "$working_dir"/lxc*-filesystem \
-                "$working_dir"/cur_network.xml
-        done
-    fi
-}
-
-make_directory "$working_dir"
-cd "$working_dir" || exit 1
-
-# Generate files as requested
-if [ $download -eq 1 ]; then
-    download_agent
-fi
-if [ $key_gen -eq 1 ]; then
-    generate_key
-fi
-if [ $generate -eq 1 ]; then
-    generate
-fi
-if [ $share_configs -eq 1 ]; then
-    distribute_configs
-fi
-if [ $generate -eq 1 ]; then
-    start_network_all
-fi
-
-# Update cluster as requested
-if [ $cib -eq 1 ]; then
-    apply_cib_entries
-fi
-if [ $add_clone -eq 1 ]; then
-    apply_cib_clone
-fi
-
-# Restore original state as requested
-if [ $restore_pcmk -eq 1 ]; then
-    restore_cib
-fi
-if [ $restore -eq 1 ]; then
-    restore_libvirt
-fi
-if [ $restore_all -eq 1 ]; then
-    restore_files
-fi
-
-# vim: set expandtab tabstop=8 softtabstop=4 shiftwidth=4 textwidth=80:
diff --git a/doc/sphinx/Pacemaker_Remote/alternatives.rst b/doc/sphinx/Pacemaker_Remote/alternatives.rst
index 83ed67cafc..adbdc994e3 100644
--- a/doc/sphinx/Pacemaker_Remote/alternatives.rst
+++ b/doc/sphinx/Pacemaker_Remote/alternatives.rst
@@ -1,95 +1,90 @@
 Alternative Configurations
 --------------------------
 
 These alternative configurations may be appropriate in limited cases, such as a
 test cluster, but are not the best method in most situations. They are
 presented here for completeness and as an example of Pacemaker's flexibility
 to suit your needs.
 
 .. index::
    single: virtual machine; as cluster node
 
 Virtual Machines as Cluster Nodes
 #################################
 
 The preferred use of virtual machines in a Pacemaker cluster is as a
 cluster resource, whether opaque or as a guest node. However, it is
 possible to run the full cluster stack on a virtual node instead.
 
 This is commonly used to set up test environments; a single physical host
 (that does not participate in the cluster) runs two or more virtual machines,
 all running the full cluster stack. This can be used to simulate a
 larger cluster for testing purposes.
 
 In a production environment, fencing becomes more complicated, especially
 if the underlying hosts run any services besides the clustered VMs.
 If the VMs are not guaranteed a minimum amount of host resources,
 CPU and I/O contention can cause timing issues for cluster components.
 
 Another situation where this approach is sometimes used is when
 the cluster owner leases the VMs from a provider and does not have
 direct access to the underlying host. The main concerns in this case
 are proper fencing (usually via a custom resource agent that communicates
 with the provider's APIs) and maintaining a static IP address between reboots,
 as well as resource contention issues.
 
 .. index::
    single: virtual machine; as remote node
 
 Virtual Machines as Remote Nodes
 ################################
 
 Virtual machines may be configured following the process for remote nodes 
 rather than guest nodes (i.e., using an **ocf:pacemaker:remote** resource
 rather than letting the cluster manage the VM directly).
 
 This is mainly useful in testing, to use a single physical host to simulate a
 larger cluster involving remote nodes. Pacemaker's Cluster Test Suite (CTS)
 uses this approach to test remote node functionality.
 
 .. index::
    single: container; as guest node
    single: container; LXC
    single: container; Docker
    single: container; bundle
    single: LXC
    single: Docker
    single: bundle
 
 Containers as Guest Nodes
 #########################
 
 `Containers <https://en.wikipedia.org/wiki/Operating-system-level_virtualization>`_
 and in particular Linux containers (LXC) and Docker, have become a popular
 method of isolating services in a resource-efficient manner.
 
 The preferred means of integrating containers into Pacemaker is as a
 cluster resource, whether opaque or using Pacemaker's ``bundle`` resource type.
 
 However, it is possible to run ``pacemaker_remote`` inside a container,
 following the process for guest nodes. This is not recommended but can
 be useful, for example, in testing scenarios, to simulate a large number of
 guest nodes.
 
 The configuration process is very similar to that described for guest nodes
 using virtual machines. Key differences:
 
 * The underlying host must install the libvirt driver for the desired container
   technology -- for example, the ``libvirt-daemon-lxc`` package to get the
   `libvirt-lxc <http://libvirt.org/drvlxc.html>`_ driver for LXC containers.
 
-* Libvirt XML definitions must be generated for the containers. The
-  ``pacemaker-cts`` package includes a script for this purpose,
-  ``/usr/share/pacemaker/tests/cts/lxc_autogen.sh``. Run it with the
-  ``--help`` option for details on how to use it. It is intended for testing
-  purposes only, and hardcodes various parameters that would need to be set
-  appropriately in real usage. Of course, you can create XML definitions
-  manually, following the appropriate libvirt driver documentation.
+* Libvirt XML definitions must be generated for the containers. You can create
+  XML definitions manually, following the appropriate libvirt driver documentation.
 
 * To share the authentication key, either share the host's ``/etc/pacemaker``
   directory with the container, or copy the key into the container's
   filesystem.
 
 * The **VirtualDomain** resource for a container will need
   **force_stop="true"** and an appropriate hypervisor option,
   for example **hypervisor="lxc:///"** for LXC containers.
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index 79266d2428..4c20efde75 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,656 +1,652 @@
 """ Test environment classes for Pacemaker's Cluster Test Suite (CTS) """
 
 __all__ = ["EnvFactory"]
 __copyright__ = "Copyright 2014-2023 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import argparse
 import os
 import random
 import socket
 import sys
 import time
 
 from pacemaker._cts.logging import LogFactory
 from pacemaker._cts.remote import RemoteFactory
 from pacemaker._cts.watcher import LogKind
 
 class Environment:
     """ A class for managing the CTS environment, consisting largely of processing
         and storing command line parameters
     """
 
     # pylint doesn't understand that self._rsh is callable (it stores the
     # singleton instance of RemoteExec, as returned by the getInstance method
     # of RemoteFactory).  It's possible we could fix this with type annotations,
     # but those were introduced with python 3.5 and we only support python 3.4.
     # I think we could also fix this by getting rid of the getInstance methods,
     # but that's a project for another day.  For now, just disable the warning.
     # pylint: disable=not-callable
 
     def __init__(self, args):
         """ Create a new Environment instance.  This class can be treated kind
             of like a dictionary due to the presence of typical dict functions
             like __contains__, __getitem__, and __setitem__.  However, it is not a
             dictionary so do not rely on standard dictionary behavior.
 
             Arguments:
 
             args -- A list of command line parameters, minus the program name.
                     If None, sys.argv will be used.
         """
 
         self.data = {}
         self._nodes = []
 
         # Set some defaults before processing command line arguments.  These are
         # either not set by any command line parameter, or they need a default
         # that can't be set in add_argument.
         self["DeadTime"] = 300
         self["StartTime"] = 300
         self["StableTime"] = 30
         self["tests"] = []
         self["IPagent"] = "IPaddr2"
         self["DoFencing"] = True
         self["ClobberCIB"] = False
         self["CIBfilename"] = None
         self["CIBResource"] = False
         self["LogWatcher"] = LogKind.ANY
         self["node-limit"] = 0
         self["scenario"] = "random"
 
         self.random_gen = random.Random()
 
         self._logger = LogFactory()
         self._rsh = RemoteFactory().getInstance()
         self._target = "localhost"
 
         self._seed_random()
         self._parse_args(args)
 
         if not self["ListTests"]:
             self._validate()
             self._discover()
 
     def _seed_random(self, seed=None):
         """ Initialize the random number generator with the given seed, or use
             the current time if None
         """
 
         if not seed:
             seed = int(time.time())
 
         self["RandSeed"] = seed
         self.random_gen.seed(str(seed))
 
     def dump(self):
         """ Print the current environment """
 
         keys = []
         for key in list(self.data.keys()):
             keys.append(key)
 
         keys.sort()
         for key in keys:
             s = "Environment[%s]" % key
             self._logger.debug("{key:35}: {val}".format(key=s, val=str(self[key])))
 
     def keys(self):
         """ Return a list of all environment keys stored in this instance """
 
         return list(self.data.keys())
 
     def __contains__(self, key):
         """ Does the given environment key exist? """
 
         if key == "nodes":
             return True
 
         return key in self.data
 
     def __getitem__(self, key):
         """ Return the given environment key, or None if it does not exist """
 
         if str(key) == "0":
             raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
 
         if key == "nodes":
             return self._nodes
 
         if key == "Name":
             return self._get_stack_short()
 
         return self.data.get(key)
 
     def __setitem__(self, key, value):
         """ Set the given environment key to the given value, overriding any
             previous value
         """
 
         if key == "Stack":
             self._set_stack(value)
 
         elif key == "node-limit":
             self.data[key] = value
             self._filter_nodes()
 
         elif key == "nodes":
             self._nodes = []
             for node in value:
                 # I don't think I need the IP address, etc. but this validates
                 # the node name against /etc/hosts and/or DNS, so it's a
                 # GoodThing(tm).
                 try:
                     n = node.strip()
                     socket.gethostbyname_ex(n)
                     self._nodes.append(n)
                 except:
                     self._logger.log("%s not found in DNS... aborting" % node)
                     raise
 
             self._filter_nodes()
 
         else:
             self.data[key] = value
 
     def random_node(self):
         """ Choose a random node from the cluster """
 
         return self.random_gen.choice(self["nodes"])
 
     def get(self, key, default=None):
         """ Return the value for key if key is in the environment, else default """
 
         if key == "nodes":
             return self._nodes
 
         return self.data.get(key, default)
 
     def _set_stack(self, name):
         """ Normalize the given cluster stack name """
 
         if name in ["corosync", "cs", "mcp"]:
             self.data["Stack"] = "corosync 2+"
 
         else:
             raise ValueError("Unknown stack: %s" % name)
 
     def _get_stack_short(self):
         """ Return the short name for the currently set cluster stack """
 
         if "Stack" not in self.data:
             return "unknown"
 
         if self.data["Stack"] == "corosync 2+":
             return "crm-corosync"
 
         LogFactory().log("Unknown stack: %s" % self["stack"])
         raise ValueError("Unknown stack: %s" % self["stack"])
 
     def _detect_systemd(self):
         """ Detect whether systemd is in use on the target node """
 
         if "have_systemd" not in self.data:
             (rc, _) = self._rsh(self._target, "systemctl list-units", verbose=0)
             self["have_systemd"] = rc == 0
 
     def _detect_syslog(self):
         """ Detect the syslog variant in use on the target node """
 
         if "syslogd" not in self.data:
             if self["have_systemd"]:
                 # Systemd
                 (_, lines) = self._rsh(self._target, r"systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
                 self["syslogd"] = lines[0].strip()
             else:
                 # SYS-V
                 (_, lines) = self._rsh(self._target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
                 self["syslogd"] = lines[0].strip()
 
             if "syslogd" not in self.data or not self["syslogd"]:
                 # default
                 self["syslogd"] = "rsyslog"
 
     def disable_service(self, node, service):
         """ Disable the given service on the given node """
 
         if self["have_systemd"]:
             # Systemd
             (rc, _) = self._rsh(node, "systemctl disable %s" % service)
             return rc
 
         # SYS-V
         (rc, _) = self._rsh(node, "chkconfig %s off" % service)
         return rc
 
     def enable_service(self, node, service):
         """ Enable the given service on the given node """
 
         if self["have_systemd"]:
             # Systemd
             (rc, _) = self._rsh(node, "systemctl enable %s" % service)
             return rc
 
         # SYS-V
         (rc, _) = self._rsh(node, "chkconfig %s on" % service)
         return rc
 
     def service_is_enabled(self, node, service):
         """ Is the given service enabled on the given node? """
 
         if self["have_systemd"]:
             # Systemd
 
             # With "systemctl is-enabled", we should check if the service is
             # explicitly "enabled" instead of the return code. For example it returns
             # 0 if the service is "static" or "indirect", but they don't really count
             # as "enabled".
             (rc, _) = self._rsh(node, "systemctl is-enabled %s | grep enabled" % service)
             return rc == 0
 
         # SYS-V
         (rc, _) = self._rsh(node, "chkconfig --list | grep -e %s.*on" % service)
         return rc == 0
 
     def _detect_at_boot(self):
         """ Detect if the cluster starts at boot """
 
         if "at-boot" not in self.data:
             self["at-boot"] = self.service_is_enabled(self._target, "corosync") \
                               or self.service_is_enabled(self._target, "pacemaker")
 
     def _detect_ip_offset(self):
         """ Detect the offset for IPaddr resources """
 
         if self["CIBResource"] and "IPBase" not in self.data:
             (_, lines) = self._rsh(self._target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
             network = lines[0].strip()
 
             (_, lines) = self._rsh(self._target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
 
             try:
                 self["IPBase"] = lines[0].strip()
             except (IndexError, TypeError):
                 self["IPBase"] = None
 
             if not self["IPBase"]:
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self._logger.log("Could not determine an offset for IPaddr resources.  Perhaps nmap is not installed on the nodes.")
                 self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
                 return
 
             # pylint thinks self["IPBase"] is a list, not a string, which causes it
             # to error out because a list doesn't have split().
             # pylint: disable=no-member
             if int(self["IPBase"].split('.')[3]) >= 240:
                 self._logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
                                 % (self["IPBase"], self["IPBase"].split('.')[3]))
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
 
     def _filter_nodes(self):
         """ If --limit-nodes is given, keep that many nodes from the front of the
             list of cluster nodes and drop the rest
         """
 
         if self["node-limit"] > 0:
             if len(self["nodes"]) > self["node-limit"]:
                 # pylint thinks self["node-limit"] is a list even though we initialize
                 # it as an int in __init__ and treat it as an int everywhere.
                 # pylint: disable=bad-string-format-type
                 self._logger.log("Limiting the number of nodes configured=%d (max=%d)"
                                 %(len(self["nodes"]), self["node-limit"]))
 
                 while len(self["nodes"]) > self["node-limit"]:
                     self["nodes"].pop(len(self["nodes"])-1)
 
     def _validate(self):
         """ Were we given all the required command line parameters? """
 
         if not self["nodes"]:
             raise ValueError("No nodes specified!")
 
     def _discover(self):
         """ Probe cluster nodes to figure out how to log and manage services """
 
         self._target = random.Random().choice(self["nodes"])
 
         exerciser = socket.gethostname()
 
         # Use the IP where possible to avoid name lookup failures
         for ip in socket.gethostbyname_ex(exerciser)[2]:
             if ip != "127.0.0.1":
                 exerciser = ip
                 break
 
         self["cts-exerciser"] = exerciser
 
         self._detect_systemd()
         self._detect_syslog()
         self._detect_at_boot()
         self._detect_ip_offset()
 
     def _parse_args(self, argv):
         """ Parse and validate command line parameters, setting the appropriate
             values in the environment dictionary.  If argv is None, use sys.argv
             instead.
         """
 
         if not argv:
             argv = sys.argv[1:]
 
         parser = argparse.ArgumentParser(epilog="%s -g virt1 -r --stonith ssh --schema pacemaker-2.0 500" % sys.argv[0])
 
         grp1 = parser.add_argument_group("Common options")
         grp1.add_argument("-g", "--dsh-group", "--group",
                           metavar="GROUP", dest="group",
                           help="Use the nodes listed in the named DSH group (~/.dsh/groups/$name)")
         grp1.add_argument("-l", "--limit-nodes",
                           type=int, default=0,
                           metavar="MAX",
                           help="Only use the first MAX cluster nodes supplied with --nodes")
         grp1.add_argument("--benchmark",
                           action="store_true",
                           help="Add timing information")
         grp1.add_argument("--list", "--list-tests",
                           action="store_true", dest="list_tests",
                           help="List the valid tests")
         grp1.add_argument("--nodes",
                           metavar="NODES",
                           help="List of cluster nodes separated by whitespace")
         grp1.add_argument("--stack",
                           default="corosync",
                           metavar="STACK",
                           help="Which cluster stack is installed")
 
         grp2 = parser.add_argument_group("Options that CTS will usually auto-detect correctly")
         grp2.add_argument("-L", "--logfile",
                           metavar="PATH",
                           help="Where to look for logs from cluster nodes")
         grp2.add_argument("--at-boot", "--cluster-starts-at-boot",
                           choices=["1", "0", "yes", "no"],
                           help="Does the cluster software start at boot time?")
         grp2.add_argument("--facility", "--syslog-facility",
                           default="daemon",
                           metavar="NAME",
                           help="Which syslog facility to log to")
         grp2.add_argument("--ip", "--test-ip-base",
                           metavar="IP",
                           help="Offset for generated IP address resources")
 
         grp3 = parser.add_argument_group("Options for release testing")
         grp3.add_argument("-r", "--populate-resources",
                           action="store_true",
                           help="Generate a sample configuration")
         grp3.add_argument("--choose",
                           metavar="NAME",
                           help="Run only the named test")
         grp3.add_argument("--fencing", "--stonith",
                           choices=["1", "0", "yes", "no", "lha", "openstack", "rhcs", "rhevm", "scsi", "ssh", "virt", "xvm"],
                           default="1",
                           help="What fencing agent to use")
         grp3.add_argument("--once",
                           action="store_true",
                           help="Run all valid tests once")
 
         grp4 = parser.add_argument_group("Additional (less common) options")
         grp4.add_argument("-c", "--clobber-cib",
                           action="store_true",
                           help="Erase any existing configuration")
         grp4.add_argument("-y", "--yes",
                           action="store_true", dest="always_continue",
                           help="Continue to run whenever prompted")
         grp4.add_argument("--boot",
                           action="store_true",
                           help="")
         grp4.add_argument("--bsc",
                           action="store_true",
                           help="")
         grp4.add_argument("--cib-filename",
                           metavar="PATH",
                           help="Install the given CIB file to the cluster")
-        grp4.add_argument("--container-tests",
-                          action="store_true",
-                          help="Include pacemaker_remote tests that run in lxc container resources")
         grp4.add_argument("--experimental-tests",
                           action="store_true",
                           help="Include experimental tests")
         grp4.add_argument("--loop-minutes",
                           type=int, default=60,
                           help="")
         grp4.add_argument("--no-loop-tests",
                           action="store_true",
                           help="Don't run looping/time-based tests")
         grp4.add_argument("--no-unsafe-tests",
                           action="store_true",
                           help="Don't run tests that are unsafe for use with ocfs2/drbd")
         grp4.add_argument("--notification-agent",
                           metavar="PATH",
                           default="/var/lib/pacemaker/notify.sh",
                           help="Script to configure for Pacemaker alerts")
         grp4.add_argument("--notification-recipient",
                           metavar="R",
                           default="/var/lib/pacemaker/notify.log",
                           help="Recipient to pass to alert script")
         grp4.add_argument("--oprofile",
                           metavar="NODES",
                           help="List of cluster nodes to run oprofile on")
         grp4.add_argument("--outputfile",
                           metavar="PATH",
                           help="Location to write logs to")
         grp4.add_argument("--qarsh",
                           action="store_true",
                           help="Use QARSH to access nodes instead of SSH")
         grp4.add_argument("--schema",
                           metavar="SCHEMA",
                           default="pacemaker-3.0",
                           help="Create a CIB conforming to the given schema")
         grp4.add_argument("--seed",
                           metavar="SEED",
                           help="Use the given string as the random number seed")
         grp4.add_argument("--set",
                           action="append",
                           metavar="ARG",
                           default=[],
                           help="Set key=value pairs (can be specified multiple times)")
         grp4.add_argument("--stonith-args",
                           metavar="ARGS",
                           default="hostlist=all,livedangerously=yes",
                           help="")
         grp4.add_argument("--stonith-type",
                           metavar="TYPE",
                           default="external/ssh",
                           help="")
         grp4.add_argument("--trunc",
                           action="store_true", dest="truncate",
                           help="Truncate log file before starting")
         grp4.add_argument("--valgrind-procs",
                           metavar="PROCS",
                           default="pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd",
                           help="Run valgrind against the given space-separated list of processes")
         grp4.add_argument("--valgrind-tests",
                           action="store_true",
                           help="Include tests using valgrind")
         grp4.add_argument("--warn-inactive",
                           action="store_true",
                           help="Warn if a resource is assigned to an inactive node")
 
         parser.add_argument("iterations",
                             nargs='?',
                             type=int, default=1,
                             help="Number of tests to run")
 
         args = parser.parse_args(args=argv)
 
         # Set values on this object based on what happened with command line
         # processing.  This has to be done in several blocks.
 
         # These values can always be set.  They get a default from the add_argument
         # calls, only do one thing, and they do not have any side effects.
         self["ClobberCIB"] = args.clobber_cib
         self["ListTests"] = args.list_tests
         self["Schema"] = args.schema
         self["Stack"] = args.stack
         self["SyslogFacility"] = args.facility
         self["TruncateLog"] = args.truncate
         self["at-boot"] = args.at_boot in ["1", "yes"]
         self["benchmark"] = args.benchmark
         self["continue"] = args.always_continue
-        self["container-tests"] = args.container_tests
         self["experimental-tests"] = args.experimental_tests
         self["iterations"] = args.iterations
         self["loop-minutes"] = args.loop_minutes
         self["loop-tests"] = not args.no_loop_tests
         self["notification-agent"] = args.notification_agent
         self["notification-recipient"] = args.notification_recipient
         self["node-limit"] = args.limit_nodes
         self["stonith-params"] = args.stonith_args
         self["stonith-type"] = args.stonith_type
         self["unsafe-tests"] = not args.no_unsafe_tests
         self["valgrind-procs"] = args.valgrind_procs
         self["valgrind-tests"] = args.valgrind_tests
         self["warn-inactive"] = args.warn_inactive
 
         # Nodes and groups are mutually exclusive, so their defaults cannot be
         # set in their add_argument calls.  Additionally, groups does more than
         # just set a value.  Here, set nodes first and then if a group is
         # specified, override the previous nodes value.
         if args.nodes:
             self["nodes"] = args.nodes.split(" ")
         else:
             self["nodes"] = []
 
         if args.group:
             self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args.dsh_group)
             LogFactory().add_file(self["OutputFile"], "CTS")
 
             dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args.dsh_group)
 
             if os.path.isfile(dsh_file):
                 self["nodes"] = []
 
                 with open(dsh_file, "r", encoding="utf-8") as f:
                     for line in f:
                         l = line.strip()
 
                         if not l.startswith('#'):
                             self["nodes"].append(l)
             else:
                 print("Unknown DSH group: %s" % args.dsh_group)
 
         # Everything else either can't have a default set in an add_argument
         # call (likely because we don't want to always have a value set for it)
         # or it does something fancier than just set a single value.  However,
         # order does not matter for these as long as the user doesn't provide
         # conflicting arguments on the command line.  So just do Everything
         # alphabetically.
         if args.boot:
             self["scenario"] = "boot"
 
         if args.bsc:
             self["DoBSC"] = True
             self["scenario"] = "basic-sanity"
 
         if args.cib_filename:
             self["CIBfilename"] = args.cib_filename
         else:
             self["CIBfilename"] = None
 
         if args.choose:
             self["scenario"] = "sequence"
             self["tests"].append(args.choose)
 
         if args.fencing:
             if args.fencing in ["0", "no"]:
                 self["DoFencing"] = False
             else:
                 self["DoFencing"] = True
 
                 if args.fencing in ["rhcs", "virt", "xvm"]:
                     self["stonith-type"] = "fence_xvm"
 
                 elif args.fencing == "scsi":
                     self["stonith-type"] = "fence_scsi"
 
                 elif args.fencing in ["lha", "ssh"]:
                     self["stonith-params"] = "hostlist=all,livedangerously=yes"
                     self["stonith-type"] = "external/ssh"
 
                 elif args.fencing == "openstack":
                     self["stonith-type"] = "fence_openstack"
 
                     print("Obtaining OpenStack credentials from the current environment")
                     self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
                         os.environ['OS_REGION_NAME'],
                         os.environ['OS_TENANT_NAME'],
                         os.environ['OS_AUTH_URL'],
                         os.environ['OS_USERNAME'],
                         os.environ['OS_PASSWORD']
                     )
 
                 elif args.fencing == "rhevm":
                     self["stonith-type"] = "fence_rhevm"
 
                     print("Obtaining RHEV-M credentials from the current environment")
                     self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                         os.environ['RHEVM_USERNAME'],
                         os.environ['RHEVM_PASSWORD'],
                         os.environ['RHEVM_SERVER'],
                         os.environ['RHEVM_PORT'],
                     )
 
         if args.ip:
             self["CIBResource"] = True
             self["ClobberCIB"] = True
             self["IPBase"] = args.ip
 
         if args.logfile:
             self["LogAuditDisabled"] = True
             self["LogFileName"] = args.logfile
             self["LogWatcher"] = LogKind.REMOTE_FILE
         else:
             # We can't set this as the default on the parser.add_argument call
             # for this option because then args.logfile will be set, which means
             # the above branch will be taken and those other values will also be
             # set.
             self["LogFileName"] = "/var/log/messages"
 
         if args.once:
             self["scenario"] = "all-once"
 
         if args.oprofile:
             self["oprofile"] = args.oprofile.split(" ")
         else:
             self["oprofile"] = []
 
         if args.outputfile:
             self["OutputFile"] = args.outputfile
             LogFactory().add_file(self["OutputFile"])
 
         if args.populate_resources:
             self["CIBResource"] = True
             self["ClobberCIB"] = True
 
         if args.qarsh:
             self._rsh.enable_qarsh()
 
         for kv in args.set:
             (name, value) = kv.split("=")
             self[name] = value
             print("Setting %s = %s" % (name, value))
 
 class EnvFactory:
     """ A class for constructing a singleton instance of an Environment object """
 
     instance = None
 
     # pylint: disable=invalid-name
     def getInstance(self, args=None):
         """ Returns the previously created instance of Environment, or creates a
             new instance if one does not already exist.
         """
 
         if not EnvFactory.instance:
             EnvFactory.instance = Environment(args)
 
         return EnvFactory.instance
diff --git a/python/pacemaker/_cts/tests/Makefile.am b/python/pacemaker/_cts/tests/Makefile.am
index f4354cbaff..e4b746e869 100644
--- a/python/pacemaker/_cts/tests/Makefile.am
+++ b/python/pacemaker/_cts/tests/Makefile.am
@@ -1,36 +1,40 @@
 #
 # Copyright 2023 the Pacemaker project contributors
 #
 # The version control history for this file may have further details.
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 
 MAINTAINERCLEANFILES    = Makefile.in
 
 pkgpythondir = $(pythondir)/$(PACKAGE)/_cts/tests
 
 pkgpython_PYTHON = __init__.py \
 				   componentfail.py \
 				   ctstest.py \
 				   fliptest.py \
 				   maintenancemode.py \
 				   nearquorumpointtest.py \
 				   partialstart.py \
 				   reattach.py \
+				   remotebasic.py \
 				   remotedriver.py \
+				   remotemigrate.py \
+				   remotestonithd.py \
 				   resourcerecover.py \
+				   remoterscfailure.py \
 				   restarttest.py \
 				   restartonebyone.py \
 				   resynccib.py \
 				   simulstart.py \
 				   simulstop.py \
 				   simulstartlite.py \
 				   simulstoplite.py \
 				   splitbraintest.py \
 				   standbytest.py \
 				   startonebyone.py \
 				   starttest.py \
 				   stonithdtest.py \
 				   stoptest.py
diff --git a/python/pacemaker/_cts/tests/__init__.py b/python/pacemaker/_cts/tests/__init__.py
index eb78c4527c..9703401b94 100644
--- a/python/pacemaker/_cts/tests/__init__.py
+++ b/python/pacemaker/_cts/tests/__init__.py
@@ -1,30 +1,86 @@
 """
 Test classes for the `pacemaker._cts` package.
 """
 
 __copyright__ = "Copyright 2023 the Pacemaker project contributors"
 __license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
 
 from pacemaker._cts.tests.componentfail import ComponentFail
 from pacemaker._cts.tests.ctstest import CTSTest
 from pacemaker._cts.tests.fliptest import FlipTest
 from pacemaker._cts.tests.maintenancemode import MaintenanceMode
 from pacemaker._cts.tests.nearquorumpointtest import NearQuorumPointTest
 from pacemaker._cts.tests.partialstart import PartialStart
 from pacemaker._cts.tests.reattach import Reattach
 from pacemaker._cts.tests.restartonebyone import RestartOnebyOne
 from pacemaker._cts.tests.resourcerecover import ResourceRecover
 from pacemaker._cts.tests.restarttest import RestartTest
 from pacemaker._cts.tests.resynccib import ResyncCIB
+from pacemaker._cts.tests.remotebasic import RemoteBasic
 from pacemaker._cts.tests.remotedriver import RemoteDriver
+from pacemaker._cts.tests.remotemigrate import RemoteMigrate
+from pacemaker._cts.tests.remoterscfailure import RemoteRscFailure
+from pacemaker._cts.tests.remotestonithd import RemoteStonithd
 from pacemaker._cts.tests.simulstart import SimulStart
 from pacemaker._cts.tests.simulstop import SimulStop
 from pacemaker._cts.tests.simulstartlite import SimulStartLite
 from pacemaker._cts.tests.simulstoplite import SimulStopLite
 from pacemaker._cts.tests.splitbraintest import SplitBrainTest
 from pacemaker._cts.tests.standbytest import StandbyTest
 from pacemaker._cts.tests.starttest import StartTest
 from pacemaker._cts.tests.startonebyone import StartOnebyOne
 from pacemaker._cts.tests.stonithdtest import StonithdTest
 from pacemaker._cts.tests.stoponebyone import StopOnebyOne
 from pacemaker._cts.tests.stoptest import StopTest
+
+def test_list(cm, audits):
+    """ Return a list of test class objects that are enabled and whose
+        is_applicable methods return True.  These are the tests that
+        should be run.
+    """
+
+    # cm is a reasonable name here.
+    # pylint: disable=invalid-name
+
+    # A list of all enabled test classes, in the order that they should
+    # be run (if we're doing --once).  There are various other ways of
+    # specifying which tests should be run, in which case the order here
+    # will not matter.
+    #
+    # Note that just because a test is listed here doesn't mean it will
+    # definitely be run - is_applicable is still taken into consideration.
+    # Also note that there are other tests that are excluded from this
+    # list for various reasons.
+    enabled_test_classes = [ FlipTest,
+                             RestartTest,
+                             StonithdTest,
+                             StartOnebyOne,
+                             SimulStart,
+                             SimulStop,
+                             StopOnebyOne,
+                             RestartOnebyOne,
+                             PartialStart,
+                             StandbyTest,
+                             MaintenanceMode,
+                             ResourceRecover,
+                             ComponentFail,
+                             SplitBrainTest,
+                             Reattach,
+                             ResyncCIB,
+                             NearQuorumPointTest,
+                             RemoteBasic,
+                             RemoteStonithd,
+                             RemoteMigrate,
+                             RemoteRscFailure,
+                           ]
+
+    result = []
+
+    for testclass in enabled_test_classes:
+        bound_test = testclass(cm)
+
+        if bound_test.is_applicable():
+            bound_test.audits = audits
+            result.append(bound_test)
+
+    return result
diff --git a/python/pacemaker/_cts/tests/ctstest.py b/python/pacemaker/_cts/tests/ctstest.py
index f2fe8bf710..6f98b50629 100644
--- a/python/pacemaker/_cts/tests/ctstest.py
+++ b/python/pacemaker/_cts/tests/ctstest.py
@@ -1,290 +1,286 @@
 """ Base classes for CTS tests """
 
 __all__ = ["CTSTest"]
 __copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import re
 
 from pacemaker._cts.audits import AuditConstraint, AuditResource
 from pacemaker._cts.environment import EnvFactory
 from pacemaker._cts.logging import LogFactory
 from pacemaker._cts.patterns import PatternSelector
 from pacemaker._cts.remote import RemoteFactory
 from pacemaker._cts.timer import Timer
 from pacemaker._cts.watcher import LogWatcher
 
 # Disable various pylint warnings that occur in so many places throughout this
 # file it's easiest to just take care of them globally.  This does introduce the
 # possibility that we'll miss some other cause of the same warning, but we'll
 # just have to be careful.
 
 # pylint doesn't understand that self._rsh is callable.
 # pylint: disable=not-callable
 
 
 class CTSTest:
     """ The base class for all cluster tests.  This implements a basic set of
         properties and behaviors like setup, tear down, time keeping, and
         statistics tracking.  It is up to specific tests to implement their own
         specialized behavior on top of this class.
     """
 
     def __init__(self, cm):
         """ Create a new CTSTest instance
 
             Arguments:
 
             cm -- A ClusterManager instance
         """
 
         # pylint: disable=invalid-name
 
         self.audits = []
         self.name = None
         self.templates = PatternSelector(cm["Name"])
 
         self.stats = { "auditfail": 0,
                       "calls": 0,
                       "failure": 0,
                       "skipped": 0,
                       "success": 0 }
 
         self._cm = cm
         self._env = EnvFactory().getInstance()
         self._r_o2cb = None
         self._r_ocfs2 = []
         self._rsh = RemoteFactory().getInstance()
         self._logger = LogFactory()
         self._timers = {}
 
         self.benchmark = True  # which tests to benchmark
         self.failed = False
-        self.is_container = False
         self.is_experimental = False
         self.is_loop = False
         self.is_unsafe = False
         self.is_valgrind = False
         self.passed = True
 
     def log(self, args):
         """ Log a message """
 
         self._logger.log(args)
 
     def debug(self, args):
         """ Log a debug message """
 
         self._logger.debug(args)
 
     def get_timer(self, key="test"):
         """ Get the start time of the given timer """
 
         try:
             return self._timers[key].start_time
         except KeyError:
             return 0
 
     def set_timer(self, key="test"):
         """ Set the start time of the given timer to now, and return
             that time
         """
 
         if key not in self._timers:
             self._timers[key] = Timer(self._logger, self.name, key)
 
         self._timers[key].start()
         return self._timers[key].start_time
 
     def log_timer(self, key="test"):
         """ Log the elapsed time of the given timer """
 
         if key not in self._timers:
             return
 
         elapsed = self._timers[key].elapsed
         self.debug("%s:%s runtime: %.2f" % (self.name, key, elapsed))
         del self._timers[key]
 
     def incr(self, name):
         """ Increment the given stats key """
 
         if name not in self.stats:
             self.stats[name] = 0
 
         self.stats[name] += 1
 
         # Reset the test passed boolean
         if name == "calls":
             self.passed = True
 
     def failure(self, reason="none"):
         """ Increment the failure count, with an optional failure reason """
 
         self.passed = False
         self.incr("failure")
         self._logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
 
         return False
 
     def success(self):
         """ Increment the success count """
 
         self.incr("success")
         return True
 
     def skipped(self):
         """ Increment the skipped count """
 
         self.incr("skipped")
         return True
 
     def __call__(self, node):
         """ Perform this test """
 
         raise NotImplementedError
 
     def audit(self):
         """ Perform all the relevant audits (see ClusterAudit), returning
             whether or not they all passed.
         """
 
         passed = True
 
         for audit in self.audits:
             if not audit():
                 self._logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name))
                 self.incr("auditfail")
                 passed = False
 
         return passed
 
     def setup(self, node):
         """ Setup this test """
 
         # node is used in subclasses
         # pylint: disable=unused-argument
 
         return self.success()
 
     def teardown(self, node):
         """ Tear down this test """
 
         # node is used in subclasses
         # pylint: disable=unused-argument
 
         return self.success()
 
     def create_watch(self, patterns, timeout, name=None):
         """ Create a new LogWatcher object with the given patterns, timeout,
             and optional name.  This object can be used to search log files
             for matching patterns during this test's run.
         """
         if not name:
             name = self.name
 
         return LogWatcher(self._env["LogFileName"], patterns, self._env["nodes"], self._env["LogWatcher"], name, timeout)
 
     def local_badnews(self, prefix, watch, local_ignore=None):
         """ Use the given watch object to search through log files for messages
             starting with the given prefix.  If no prefix is given, use
             "LocalBadNews:" by default.  The optional local_ignore list should
             be a list of regexes that, if found in a line, will cause that line
             to be ignored.
 
             Return the number of matches found.
         """
         errcount = 0
         if not prefix:
             prefix = "LocalBadNews:"
 
         ignorelist = [" CTS: ", prefix]
 
         if local_ignore:
             ignorelist += local_ignore
 
         while errcount < 100:
             match = watch.look(0)
             if match:
                 add_err = True
 
                 for ignore in ignorelist:
                     if add_err and re.search(ignore, match):
                         add_err = False
 
                 if add_err:
                     self._logger.log("%s %s" % (prefix, match))
                     errcount += 1
             else:
                 break
         else:
             self._logger.log("Too many errors!")
 
         watch.end()
         return errcount
 
     def is_applicable(self):
         """ Return True if this test is applicable in the current test configuration.
             This method must be implemented by all subclasses.
         """
 
         if self.is_loop and not self._env["loop-tests"]:
             return False
 
         if self.is_unsafe and not self._env["unsafe-tests"]:
             return False
 
         if self.is_valgrind and not self._env["valgrind-tests"]:
             return False
 
         if self.is_experimental and not self._env["experimental-tests"]:
             return False
 
-        if self.is_container and not self._env["container-tests"]:
-            return False
-
         if self._env["benchmark"] and not self.benchmark:
             return False
 
         return True
 
     def _find_ocfs2_resources(self, node):
         """ Find any OCFS2 filesystems mounted on the given cluster node,
             populating the internal self._r_ocfs2 list with them and returning
             the number of OCFS2 filesystems.
          """
 
         self._r_o2cb = None
         self._r_ocfs2 = []
 
         (_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self._cm, line)
 
                 if r.rtype == "o2cb" and r.parent != "NA":
                     self.debug("Found o2cb: %s" % self._r_o2cb)
                     self._r_o2cb = r.parent
 
             if re.search("^Constraint", line):
                 c = AuditConstraint(self._cm, line)
 
                 if c.type == "rsc_colocation" and c.target == self._r_o2cb:
                     self._r_ocfs2.append(c.rsc)
 
         self.debug("Found ocfs2 filesystems: %s" % self._r_ocfs2)
         return len(self._r_ocfs2)
 
     def can_run_now(self, node):
         """ Return True if we can meaningfully run right now """
 
         # node is used in subclasses
         # pylint: disable=unused-argument
 
         return True
 
     @property
     def errors_to_ignore(self):
         """ Return list of errors which should be ignored """
 
         return []
diff --git a/python/pacemaker/_cts/tests/remotebasic.py b/python/pacemaker/_cts/tests/remotebasic.py
new file mode 100644
index 0000000000..2f25aaff90
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remotebasic.py
@@ -0,0 +1,39 @@
+""" Start and stop a remote node """
+
+__all__ = ["RemoteBasic"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+
+
+class RemoteBasic(RemoteDriver):
+    """ A concrete test that starts and stops a remote node """
+
+    def __init__(self, cm):
+        """ Create a new RemoteBasic instance
+
+            Arguments:
+
+            cm -- A ClusterManager instance
+        """
+
+        RemoteDriver.__init__(self, cm)
+
+        self.name = "RemoteBasic"
+
+    def __call__(self, node):
+        """ Perform this test """
+
+        if not self.start_new_test(node):
+            return self.failure(self.fail_string)
+
+        self.test_attributes(node)
+        self.cleanup_metal(node)
+
+        self.debug("Waiting for the cluster to recover")
+        self._cm.cluster_stable()
+        if self.failed:
+            return self.failure(self.fail_string)
+
+        return self.success()
diff --git a/python/pacemaker/_cts/tests/remotemigrate.py b/python/pacemaker/_cts/tests/remotemigrate.py
new file mode 100644
index 0000000000..e22e98f039
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remotemigrate.py
@@ -0,0 +1,63 @@
+""" Move a connection resource from one node to another """
+
+__all__ = ["RemoteMigrate"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally.  This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class RemoteMigrate(RemoteDriver):
+    """ A concrete test that moves a connection resource from one node to another """
+
+    def __init__(self, cm):
+        """ Create a new RemoteMigrate instance
+
+            Arguments:
+
+            cm -- A ClusterManager instance
+        """
+
+        RemoteDriver.__init__(self, cm)
+
+        self.name = "RemoteMigrate"
+
+    def __call__(self, node):
+        """ Perform this test """
+
+        # This code is very similar to __call__ in remotestonithd.py, but I don't think
+        # it's worth turning into a library function nor making one a subclass of the
+        # other.  I think that's more confusing than leaving the duplication.
+        # pylint: disable=duplicate-code
+
+        if not self.start_new_test(node):
+            return self.failure(self.fail_string)
+
+        self.migrate_connection(node)
+        self.cleanup_metal(node)
+
+        self.debug("Waiting for the cluster to recover")
+        self._cm.cluster_stable()
+        if self.failed:
+            return self.failure(self.fail_string)
+
+        return self.success()
+
+    def is_applicable(self):
+        """ Return True if this test is applicable in the current test configuration. """
+
+        if not RemoteDriver.is_applicable(self):
+            return False
+
+        # This test requires at least three nodes: one to convert to a
+        # remote node, one to host the connection originally, and one
+        # to migrate the connection to.
+        return len(self._env["nodes"]) >= 3
diff --git a/python/pacemaker/_cts/tests/remoterscfailure.py b/python/pacemaker/_cts/tests/remoterscfailure.py
new file mode 100644
index 0000000000..33c1d3739a
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remoterscfailure.py
@@ -0,0 +1,71 @@
+""" Cause the Pacemaker Remote connection resource to fail """
+
+__all__ = ["RemoteRscFailure"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally.  This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class RemoteRscFailure(RemoteDriver):
+    """ A concrete test that causes the Pacemaker Remote connection resource
+        to fail
+    """
+
+    def __init__(self, cm):
+        """ Create a new RemoteRscFailure instance
+
+            Arguments:
+
+            cm -- A ClusterManager instance
+        """
+
+        RemoteDriver.__init__(self, cm)
+        self.name = "RemoteRscFailure"
+
+    def __call__(self, node):
+        """ Perform this test """
+
+        if not self.start_new_test(node):
+            return self.failure(self.fail_string)
+
+        # This is an important step. We are migrating the connection
+        # before failing the resource. This verifies that the migration
+        # has properly maintained control over the remote-node.
+        self.migrate_connection(node)
+
+        self.fail_rsc(node)
+        self.cleanup_metal(node)
+
+        self.debug("Waiting for the cluster to recover")
+        self._cm.cluster_stable()
+        if self.failed:
+            return self.failure(self.fail_string)
+
+        return self.success()
+
+    @property
+    def errors_to_ignore(self):
+        """ Return list of errors which should be ignored """
+
+        return [ r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)",
+                 r"Dummy.*: No process state file found" ] + super().errors_to_ignore
+
+    def is_applicable(self):
+        """ Return True if this test is applicable in the current test configuration. """
+
+        if not RemoteDriver.is_applicable(self):
+            return False
+
+        # This test requires at least three nodes: one to convert to a
+        # remote node, one to host the connection originally, and one
+        # to migrate the connection to.
+        return len(self._env["nodes"]) >= 3
diff --git a/python/pacemaker/_cts/tests/remotestonithd.py b/python/pacemaker/_cts/tests/remotestonithd.py
new file mode 100644
index 0000000000..fd5144e2f6
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remotestonithd.py
@@ -0,0 +1,60 @@
+""" Fail the connection resource and fence the remote node """
+
+__all__ = ["RemoteStonithd"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+
+
+class RemoteStonithd(RemoteDriver):
+    """ A concrete test that fails the connection resource and fences the
+        remote node
+    """
+
+    def __init__(self, cm):
+        """ Create a new RemoteStonithd instance
+
+            Arguments:
+
+            cm -- A ClusterManager instance
+        """
+
+        RemoteDriver.__init__(self, cm)
+
+        self.name = "RemoteStonithd"
+
+    def __call__(self, node):
+        """ Perform this test """
+
+        if not self.start_new_test(node):
+            return self.failure(self.fail_string)
+
+        self.fail_connection(node)
+        self.cleanup_metal(node)
+
+        self.debug("Waiting for the cluster to recover")
+        self._cm.cluster_stable()
+        if self.failed:
+            return self.failure(self.fail_string)
+
+        return self.success()
+
+    def is_applicable(self):
+        """ Return True if this test is applicable in the current test configuration. """
+
+        if not RemoteDriver.is_applicable(self):
+            return False
+
+        return self._env.get("DoFencing", True)
+
+    @property
+    def errors_to_ignore(self):
+        """ Return list of errors which should be ignored """
+
+        return [ r"Lost connection to Pacemaker Remote node",
+                 r"Software caused connection abort",
+                 r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor",
+                 r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*",
+                 r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)",
+                 r"error: Result of monitor operation for .* on remote-.*: Internal communication failure" ] + super().errors_to_ignore