diff --git a/.gitignore b/.gitignore index fb35b3da96..95e1283691 100644 --- a/.gitignore +++ b/.gitignore @@ -1,353 +1,353 @@ # # Copyright 2011-2023 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # Common conventions for files that should be ignored *~ *.bz2 *.diff *.orig *.patch *.rej *.sed *.swp *.tar.gz *.tgz \#* .\#* logs # libtool artifacts *.la *.lo .libs libltdl libtool libtool.m4 ltdl.m4 /m4/argz.m4 /m4/ltargz.m4 /m4/ltoptions.m4 /m4/ltsugar.m4 /m4/ltversion.m4 /m4/lt~obsolete.m4 # autotools artifacts .deps .dirstamp Makefile Makefile.in aclocal.m4 autoconf autoheader autom4te.cache/ automake /confdefs.h config.log config.status configure /conftest* # gettext artifacts /ABOUT-NLS /m4/codeset.m4 /m4/fcntl-o.m4 /m4/gettext.m4 /m4/glibc2.m4 /m4/glibc21.m4 /m4/iconv.m4 /m4/intdiv0.m4 /m4/intl.m4 /m4/intldir.m4 /m4/intlmacosx.m4 /m4/intmax.m4 /m4/inttypes-pri.m4 /m4/inttypes_h.m4 /m4/lcmessage.m4 /m4/lib-ld.m4 /m4/lib-link.m4 /m4/lib-prefix.m4 /m4/lock.m4 /m4/longlong.m4 /m4/nls.m4 /m4/po.m4 /m4/printf-posix.m4 /m4/progtest.m4 /m4/size_max.m4 /m4/stdint_h.m4 /m4/threadlib.m4 /m4/uintmax_t.m4 /m4/visibility.m4 /m4/wchar_t.m4 /m4/wint_t.m4 /m4/xsize.m4 /po/*.gmo /po/*.header /po/*.pot /po/*.sin /po/Makefile.in.in /po/Makevars.template /po/POTFILES /po/Rules-quot /po/stamp-po # configure targets /agents/ocf/ClusterMon /agents/ocf/Dummy /agents/ocf/HealthCPU /agents/ocf/HealthIOWait /agents/ocf/HealthSMART /agents/ocf/Stateful /agents/ocf/SysInfo /agents/ocf/attribute /agents/ocf/controld /agents/ocf/ifspeed /agents/ocf/ping /agents/ocf/remote /agents/stonith/fence_legacy /agents/stonith/fence_watchdog /cts/benchmark/clubench /cts/cluster_test /cts/cts /cts/cts-attrd /cts/cts-cli /cts/cts-exec /cts/cts-fencing /cts/cts-lab /cts/cts-regression /cts/cts-scheduler /cts/cts-schemas /cts/lab/CTS.py /cts/support/LSBDummy /cts/support/cts-support /cts/support/fence_dummy /cts/support/pacemaker-cts-dummyd /cts/support/pacemaker-cts-dummyd@.service /daemons/execd/pacemaker_remote /daemons/execd/pacemaker_remote.service /daemons/fenced/fence_legacy /daemons/fenced/fence_watchdog -/daemons/pacemakerd/pacemaker.combined.upstart /daemons/pacemakerd/pacemaker.service -/daemons/pacemakerd/pacemaker.upstart /doc/Doxyfile /etc/init.d/pacemaker /etc/logrotate.d/pacemaker /etc/sysconfig/pacemaker /include/config.h /include/config.h.in /include/crm_config.h /maint/bumplibs /python/pacemaker/buildoptions.py /python/setup.py /tools/cluster-clean /tools/cluster-helper /tools/cluster-init /tools/cibsecret /tools/crm_error /tools/crm_failcount /tools/crm_master /tools/crm_mon.service -/tools/crm_mon.upstart /tools/crm_report /tools/crm_rule /tools/crm_standby /tools/pcmk_simtimes /tools/report.collector /tools/report.common /xml/rng-helper # Compiled targets and intermediary files *.o *.pc *.pyc /daemons/attrd/pacemaker-attrd /daemons/based/pacemaker-based /daemons/controld/pacemaker-controld /daemons/execd/cts-exec-helper /daemons/execd/pacemaker-execd /daemons/execd/pacemaker-remoted /daemons/fenced/cts-fence-helper /daemons/fenced/pacemaker-fenced /daemons/pacemakerd/pacemakerd /daemons/schedulerd/pacemaker-schedulerd /devel/scratch /lib/gnu/stdalign.h /tools/attrd_updater /tools/cibadmin /tools/crmadmin /tools/crm_attribute /tools/crm_diff /tools/crm_mon /tools/crm_node /tools/crm_resource /tools/crm_shadow /tools/crm_simulate /tools/crm_ticket /tools/crm_verify /tools/iso8601 /tools/stonith_admin # Generated XML schema files /xml/crm_mon.rng /xml/pacemaker*.rng /xml/versions.rng /xml/api/api-result*.rng # Working directories for make dist and make export /pacemaker-[a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9] # Documentation build targets and intermediary files *.7 *.7.xml *.7.html *.8 *.8.xml *.8.html GPATH GRTAGS GTAGS TAGS /daemons/fenced/pacemaker-fenced.xml /daemons/schedulerd/pacemaker-schedulerd.xml /doc/.ABI-build /doc/HTML /doc/abi_dumps /doc/abi-check /doc/api/ /doc/compat_reports /doc/crm_fencing.html /doc/sphinx/*/_build /doc/sphinx/*/conf.py /doc/sphinx/*/generated /doc/sphinx/build-2.1.txt /doc/sphinx/shared/images/*.png # Test artifacts (from unit tests, regression tests, static analysis, etc.) *.coverity *.gcda *.gcno coverity-* pacemaker_*.info /coverage /cppcheck.out /cts/scheduler/bug-rh-1097457.log /cts/scheduler/bug-rh-1097457.trs /cts/scheduler/shadow.* /cts/schemas/test-*/ref/*.up* /cts/schemas/test-*/ref.err/*.up.err* /cts/test-suite.log /lib/*/fuzzers/*/*_fuzzer /lib/*/tests/*/*.log /lib/*/tests/*/*_test /lib/*/tests/*/*.trs /lib/common/tests/schemas/schemas /test/_test_file.c # Packaging artifacts *.rpm /pacemaker.spec /rpm/[A-LN-Z]* /rpm/build.counter /rpm/mock # Project maintainer artifacts /maint/gnulib /maint/mocked/based /maint/testcc_helper.cc /maint/testcc_*_h # Formerly built files (helps when jumping back and forth in checkout) /.ABI-build /Doxyfile /HTML /abi_dumps /abi-check /agents/ocf/o2cb /build.counter /compat_reports /compile /cts/.regression.failed.diff /attrd /cib /config.guess /config.sub /coverage.sh /crmd /cts/CTS.py /cts/CTSlab.py /cts/CTSvars.py /cts/HBDummy /cts/LSBDummy /cts/OCFIPraTest.py /cts/cts-coverage /cts/cts-log-watcher /cts/cts-support /cts/fence_dummy /cts/lab/CTSlab.py /cts/lab/CTSvars.py /cts/lab/OCFIPraTest.py /cts/lab/cluster_test /cts/lab/cts /cts/lab/cts-log-watcher /cts/lxc_autogen.sh /cts/pacemaker-cts-dummyd /cts/pacemaker-cts-dummyd@.service /daemons/based/cibmon /daemons/fenced/fence_legacy /daemons/fenced/fence_watchdog /daemons/pacemakerd/pacemaker +/daemons/pacemakerd/pacemaker.combined.upstart +/daemons/pacemakerd/pacemaker.upstart /depcomp /doc/*.build /doc/*/en-US/Ap-*.xml /doc/*/en-US/Ch-*.xml /doc/*/publican.cfg /doc/*/publish /doc/*/tmp/** /doc/Clusters_from_Scratch.txt /doc/Pacemaker_Explained.txt /doc/acls.html /doc/publican-catalog* /doc/shared/en-US/*.xml /doc/shared/en-US/images/pcmk-*.png /doc/shared/en-US/images/Policy-Engine-*.png /extra/*/* /fencing /include/stamp-* /install-sh /lib/common/md5.c /lib/common/tests/flags/pcmk__clear_flags_as /lib/common/tests/flags/pcmk__set_flags_as /lib/common/tests/flags/pcmk_all_flags_set /lib/common/tests/flags/pcmk_any_flags_set /lib/common/tests/operations/parse_op_key /lib/common/tests/strings/pcmk__btoa /lib/common/tests/strings/pcmk__parse_ll_range /lib/common/tests/strings/pcmk__scan_double /lib/common/tests/strings/pcmk__str_any_of /lib/common/tests/strings/pcmk__strcmp /lib/common/tests/strings/pcmk__char_in_any_str /lib/common/tests/utils/pcmk_str_is_infinity /lib/common/tests/utils/pcmk_str_is_minus_infinity /lib/gnu/libgnu.a /lib/pengine/tests/rules/ /lrmd /ltmain.sh /mcp /missing /mock /pacemaker-*.spec /pengine /py-compile /scratch /tools/cluster-init +/tools/crm_mon.upstart /test-driver /xml/assets /xml/crm.dtd /xml/version-diff.sh ylwrap diff --git a/INSTALL.md b/INSTALL.md index 7ac85c9090..26524d1aee 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,84 +1,84 @@ # How to Install Pacemaker ## Build Dependencies | Version | Fedora-based | Suse-based | Debian-based | |:---------------:|:------------------:|:------------------:|:--------------:| | 1.13 or later | automake | automake | automake | | 2.64 or later | autoconf | autoconf | autoconf | | | libtool | libtool | libtool | | | libtool-ltdl-devel | | libltdl-dev | | | libuuid-devel | libuuid-devel | uuid-dev | | 0.28 or later | pkgconfig | pkgconfig | pkg-config | | 2.42.0 or later | glib2-devel | glib2-devel | libglib2.0-dev | | 2.9.2 or later | libxml2-devel | libxml2-devel | libxml2-dev | | | libxslt-devel | libxslt-devel | libxslt-dev | | | bzip2-devel | libbz2-devel | libbz2-dev | | 1.0.1 or later | libqb-devel | libqb-devel | libqb-dev | | 3.6 or later | python3 | python3 | python3 | | 0.18 or later | gettext-devel | gettext-tools | gettext | | 0.18 or later | | | autopoint | | 3.1.7 or later | gnutls-devel | libgnutls-devel | libgnutls-dev | Also: * make must be GNU (or compatible) (setting MAKE=gmake might also work but is untested) * GNU (or compatible) getopt must be somewhere on the PATH ### Cluster Stack Dependencies *Only corosync is currently supported* | Version | Fedora-based | Suse-based | Debian-based | |:---------------:|:------------------:|:------------------:|:--------------:| | 2.0.0 or later | corosynclib | libcorosync | corosync | | 2.0.0 or later | corosynclib-devel | libcorosync-devel | | | | | | libcfg-dev | | | | | libcpg-dev | | | | | libcmap-dev | | | | | libquorum-dev | ### Optional Build Dependencies | Feature Enabled | Version | Fedora-based | Suse-based | Debian-based | |:-----------------------------------------------:|:--------------:|:-----------------------:|:-----------------------:|:-----------------------:| | encrypted remote CIB admin | | pam-devel | pam-devel | libpam0g-dev | | interactive crm_mon | | ncurses-devel | ncurses-devel | ncurses-dev | | systemd support | | systemd-devel | systemd-devel | libsystemd-dev | -| systemd/upstart resource support | 1.5.12 or later| dbus-devel | dbus-devel | libdbus-1-dev | +| systemd resource support | 1.5.12 or later| dbus-devel | dbus-devel | libdbus-1-dev | | Linux-HA style fencing agents | | cluster-glue-libs-devel | libglue-devel | cluster-glue-dev | | documentation | | asciidoc or asciidoctor | asciidoc or asciidoctor | asciidoc or asciidoctor | | documentation | | help2man | help2man | help2man | | documentation | | inkscape | inkscape | inkscape | | documentation | | docbook-style-xsl | docbook-xsl-stylesheets | docbook-xsl | | documentation | | python3-sphinx | python3-sphinx | python3-sphinx | | documentation (PDF) | | latexmk texlive texlive-capt-of texlive-collection-xetex texlive-fncychap texlive-framed texlive-multirow texlive-needspace texlive-tabulary texlive-titlesec texlive-threeparttable texlive-upquote texlive-wrapfig texlive-xetex | texlive texlive-latex | texlive texlive-latex-extra | | annotated source code as HTML via "make global" | | global | global | global | | RPM packages via "make rpm" | 4.14 or later | rpm | rpm | (n/a) | | unit tests | 1.1.0 or later | libcmocka-devel | libcmocka-devel | libcmocka-dev | ## Optional Testing Dependencies * procps and psmisc (if running cts-exec, cts-fencing, or CTS lab) * valgrind (if running valgrind tests in cts-cli, cts-scheduler, or CTS lab) * python3-dateutil and python3-systemd (if running CTS lab on cluster nodes running systemd) * nmap (if not specifying an IP address base) * oprofile (if running CTS lab profiling tests) * dlm (to log DLM debugging info after CTS lab tests) * xmllint (to validate tool output in cts-cli) ## Simple Install $ make && sudo make install If GNU make is not your default make, use "gmake" instead. ## Detailed Install First, browse the build options that are available: $ ./autogen.sh $ ./configure --help Re-run ./configure with any options you want, then proceed with the simple method. diff --git a/configure.ac b/configure.ac index 7b1007e346..daebc9292a 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2226 +1,2176 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2024 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl ============================================== dnl Bootstrap autotools dnl ============================================== # Require a minimum version of autoconf itself AC_PREREQ(2.64) dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08). dnl Once we can require that version, we can simplify this, and no longer dnl need ACLOCAL_AMFLAGS in Makefile.am. m4_ifdef([AC_CONFIG_MACRO_DIRS], [AC_CONFIG_MACRO_DIRS([m4])], [AC_CONFIG_MACRO_DIR([m4])]) m4_include([m4/version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) LT_CONFIG_LTDL_DIR([libltdl]) AC_CONFIG_AUX_DIR([libltdl/config]) dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go dnl dnl include/config.h dnl - Internal API dnl - Contains all defines dnl - include/config.h.in is generated automatically by autoheader dnl - Not to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl include/crm_config.h dnl - External API dnl - Contains a subset of defines dnl - include/crm_config.h.in is manually edited to select the subset dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) dnl 1.13: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.13 foreign tar-ustar subdir-objects]) dnl Require minimum version of pkg-config PKG_PROG_PKG_CONFIG(0.28) AS_IF([test x"${PKG_CONFIG}" != x""], [], [AC_MSG_FAILURE([Could not find required build tool pkg-config (0.28 or later)])]) PKG_INSTALLDIR PKG_NOARCH_INSTALLDIR dnl ============================================== dnl Compiler checks and helpers dnl ============================================== dnl A particular compiler can be forced by setting the CC environment variable AC_PROG_CC dnl C++ is needed only to run maintainer utilities, not to build AC_PROG_CXX dnl Use at least C99 if possible (automatic for autoconf >= 2.70) m4_version_prereq([2.70], [:], [AC_PROG_CC_STDC]) # cc_supports_flag # Return success if the C compiler supports the given flag cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING([whether $CC supports $@]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } # cc_temp_flags # Use the given flags for subsequent C compilation. These can be reverted to # what was used previously with cc_restore_flags. This allows certain tests to # use specific flags without affecting anything else. cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } # cc_restore_flags # Restore C compiler flags to what they were before the last cc_temp_flags # call. cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } # Check for fatal warning support AS_IF([test $enable_fatal_warnings -ne $DISABLED dnl && test x"$GCC" = x"yes" && cc_supports_flag -Werror], [WERROR="-Werror"], [ WERROR="" AS_CASE([$enable_fatal_warnings], [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])], [$OPTIONAL], [enable_fatal_warnings=$DISABLED]) ]) dnl ============================================== dnl Linker checks dnl ============================================== # Check whether linker supports --enable-new-dtags to use RUNPATH instead of # RPATH. It is necessary to do this before libtool does linker detection. # See also: https://github.com/kronosnet/kronosnet/issues/107 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags], [AM_LDFLAGS=-Wl,--enable-new-dtags], [AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])]) AC_SUBST([AM_LDFLAGS]) saved_LDFLAGS="$LDFLAGS" LDFLAGS="$AM_LDFLAGS $LDFLAGS" LT_INIT([dlopen]) LDFLAGS="$saved_LDFLAGS" LTDL_INIT([convenience]) dnl ============================================== dnl Define configure options dnl ============================================== # yes_no_try # Map a yes/no/try user selection to $REQUIRED for yes, $DISABLED for no, and # $OPTIONAL for try. DISABLED=0 REQUIRED=1 OPTIONAL=2 yes_no_try() { local value AS_IF([test x"$1" = x""], [value="$2"], [value="$1"]) AS_CASE(["`echo "$value" | tr '[A-Z]' '[a-z]'`"], [0|no|false|disable], [return $DISABLED], [1|yes|true|enable], [return $REQUIRED], [try|check], [return $OPTIONAL] ) AC_MSG_ERROR([Invalid option value "$value"]) } # # Fix the defaults of certain built-in variables so they can be used in the # defaults for our custom arguments # AC_MSG_NOTICE([Sanitizing prefix: ${prefix}]) AS_IF([test x"$prefix" = x"NONE"], [ prefix=/usr dnl Fix default variables - "prefix" variable if not specified AS_IF([test x"$localstatedir" = x"\${prefix}/var"], [localstatedir="/var"]) AS_IF([test x"$sysconfdir" = x"\${prefix}/etc"], [sysconfdir="/etc"]) ]) AC_MSG_NOTICE([Sanitizing exec_prefix: ${exec_prefix}]) AS_CASE([$exec_prefix], [prefix|NONE], [exec_prefix=$prefix]) AC_MSG_NOTICE([Sanitizing libdir: ${libdir}]) AS_CASE([$libdir], [prefix|NONE], [ AC_MSG_CHECKING([which lib directory to use]) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" AS_IF([test -d ${trydir}], [ libdir=${trydir} break ]) done AC_MSG_RESULT([$libdir]) ]) # Start a list of optional features this build supports PCMK_FEATURES="" dnl This section should include only the definition of configure script dnl options and determining their values. Processing should be done later when dnl possible, other than what's needed to determine values and defaults. dnl Per the autoconf docs, --enable-*/--disable-* options should control dnl features inherent to Pacemaker, while --with-*/--without-* options should dnl control the use of external software. However, --enable-*/--disable-* may dnl implicitly require additional external dependencies, and dnl --with-*/--without-* may implicitly enable or disable features, so the dnl line is blurry. dnl dnl We also use --with-* options for custom file, directory, and path dnl locations, since autoconf does not provide an option type for those. dnl --enable-* options: build process AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])] ) yes_no_try "$enable_quiet" "no" enable_quiet=$? AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@try@:>@])], ) yes_no_try "$enable_fatal_warnings" "try" enable_fatal_warnings=$? AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])] ) yes_no_try "$enable_hardening" "try" enable_hardening=$? dnl --enable-* options: features AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])] ) yes_no_try "$enable_systemd" "try" enable_systemd=$? -AC_ARG_ENABLE([upstart], - [AS_HELP_STRING([--enable-upstart], - [enable support for managing resources via Upstart (deprecated) @<:@try@:>@])] -) -yes_no_try "$enable_upstart" "try" -enable_upstart=$? - dnl --enable-* options: features inherent to Pacemaker # AM_GNU_GETTEXT calls AM_NLS which defines the nls option, but it defaults # to enabled. We override the definition of AM_NLS to flip the default and mark # it as experimental in the help text. AC_DEFUN([AM_NLS], [AC_MSG_CHECKING([whether NLS is requested]) AC_ARG_ENABLE([nls], [AS_HELP_STRING([--enable-nls], [use Native Language Support (experimental)])], USE_NLS=$enableval, USE_NLS=no) AC_MSG_RESULT([$USE_NLS]) AC_SUBST([USE_NLS])] ) AM_GNU_GETTEXT([external]) AM_GNU_GETTEXT_VERSION([0.18]) dnl --with-* options: external software support, and custom locations dnl This argument is defined via an M4 macro so default can be a variable AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACEMAKER_VERSION="$withval" ], [ PACEMAKER_VERSION="$PACKAGE_VERSION" ])] ) VERSION_ARG(VERSION_NUMBER) CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) AS_IF([test x"${CRM_DAEMON_USER}" = x""], [CRM_DAEMON_USER="hacluster"]) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) AS_IF([test x"${CRM_DAEMON_GROUP}" = x""], [CRM_DAEMON_GROUP="haclient"]) BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], m4_normalize([ address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))], [ BUG_URL="$withval" ] ) AS_IF([test x"${BUG_URL}" = x""], [BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"]) dnl --with-* options: features AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets @<:@no@:>@])] ) yes_no_try "$with_cibsecrets" "no" with_cibsecrets=$? PCMK__GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK__GNUTLS_PRIORITIES="$withval" ] ) AC_ARG_WITH([concurrent-fencing-default], [AS_HELP_STRING([--with-concurrent-fencing-default], m4_normalize([ default value for concurrent-fencing cluster option (deprecated) @<:@true@:>@]))], ) AS_CASE([$with_concurrent_fencing_default], [""], [with_concurrent_fencing_default="true"], [true], [], [false], [PCMK_FEATURES="$PCMK_FEATURES concurrent-fencing-default-false"], [AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])] ) AC_ARG_WITH([sbd-sync-default], [AS_HELP_STRING([--with-sbd-sync-default], m4_normalize([ default value used by sbd if SBD_SYNC_RESOURCE_STARTUP environment variable is not set @<:@false@:>@]))], ) AS_CASE([$with_sbd_sync_default], [""], [with_sbd_sync_default=false], [false], [], [true], [PCMK_FEATURES="$PCMK_FEATURES default-sbd-sync"], [AC_MSG_ERROR([Invalid value "$with_sbd_sync_default" for --with-sbd-sync-default])] ) AC_ARG_WITH([resource-stickiness-default], [AS_HELP_STRING([--with-resource-stickiness-default], [If positive, value to add to new CIBs as explicit resource default for resource-stickiness @<:@0@:>@])], ) errmsg="Invalid value \"$with_resource_stickiness_default\" for --with-resource-stickiness-default" AS_CASE([$with_resource_stickiness_default], [0|""], [with_resource_stickiness_default="0"], [*[[!0-9]]*], [AC_MSG_ERROR([$errmsg])], [PCMK_FEATURES="$PCMK_FEATURES default-resource-stickiness"] ) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer @<:@try@:>@])] ) yes_no_try "$with_corosync" "try" with_corosync=$? dnl Get default from Corosync if possible PKG_CHECK_VAR([PCMK__COROSYNC_CONF], [corosync], [corosysconfdir], [PCMK__COROSYNC_CONF="$PCMK__COROSYNC_CONF/corosync.conf"], [PCMK__COROSYNC_CONF="${sysconfdir}/corosync/corosync.conf"]) AC_ARG_WITH([corosync-conf], [AS_HELP_STRING([--with-corosync-conf], m4_normalize([ location of Corosync configuration file @<:@value from Corosync package if available otherwise SYSCONFDIR/corosync/corosync.conf@:>@]))], [ PCMK__COROSYNC_CONF="$withval" ] ) AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios resources (deprecated)])] ) yes_no_try "$with_nagios" "try" with_nagios=$? dnl --with-* options: directory locations AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins (deprecated) @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata (deprecated) @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], m4_normalize([ directory for lsb resources (init scripts), or "try" to check for common locations, or "no" to disable] @<:@try@:>@))], [ INITDIR="$withval" ] ) AS_IF([test x"$INITDIR" = x""], [INITDIR="try"]) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) dnl --runstatedir is available as of autoconf 2.70 (2020-12-08). When users dnl have an older version, they can use our --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) dnl Get default from resource-agents if possible. Otherwise, the default uses dnl /usr/lib rather than libdir because it's determined by the OCF project and dnl not Pacemaker. Even if a user wants to install Pacemaker to /usr/local or dnl such, the OCF agents will be expected in their usual location. However, we dnl do give the user the option to override it. PKG_CHECK_VAR([PCMK_OCF_ROOT], [resource-agents], [ocfrootdir], [], [PCMK_OCF_ROOT="/usr/lib/ocf"]) AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], m4_normalize([ OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@value from resource-agents package if available otherwise /usr/lib/ocf@:>@]))], [ PCMK_OCF_ROOT="$withval" ] ) dnl Get default from resource-agents if possible PKG_CHECK_VAR([PCMK__OCF_RA_PATH], [resource-agents], [ocfrapath], [], [PCMK__OCF_RA_PATH="$PCMK_OCF_ROOT/resource.d"]) AC_ARG_WITH([ocfrapath], [AS_HELP_STRING([--with-ocfrapath=DIR], m4_normalize([ OCF resource agent directories (colon-separated) to search @<:@value from resource-agents package if available otherwise OCFDIR/resource.d@:>@]))], [ PCMK__OCF_RA_PATH="$withval" ] ) OCF_RA_INSTALL_DIR="$PCMK_OCF_ROOT/resource.d" AC_ARG_WITH([ocfrainstalldir], [AS_HELP_STRING([--with-ocfrainstalldir=DIR], m4_normalize([ OCF installation directory for Pacemakers resource agents @<:@OCFDIR/resource.d@:>@]))], [ OCF_RA_INSTALL_DIR="$withval" ] ) dnl Get default from fence-agents if available PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix], [PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"], [PCMK__FENCE_BINDIR="$sbindir"]) AC_ARG_WITH([fence-bindir], [AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([ directory for executable fence agents @<:@value from fence-agents package if available otherwise SBINDIR@:>@]))], [ PCMK__FENCE_BINDIR="$withval" ] ) dnl --with-* options: non-production testing AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations, for effective profiling @<:@no@:>@])] ) yes_no_try "$with_profiling" "no" with_profiling=$? AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations, for effective profiling and coverage testing @<:@no@:>@])] ) yes_no_try "$with_coverage" "no" with_coverage=$? AC_DEFINE_UNQUOTED([PCMK__WITH_COVERAGE], [$with_coverage], [Build with code coverage]) AM_CONDITIONAL([BUILD_COVERAGE], [test $with_coverage -ne $DISABLED]) AC_ARG_WITH([sanitizers], [AS_HELP_STRING([--with-sanitizers=...,...], [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])], [ SANITIZERS="$withval" ], [ SANITIZERS="" ]) dnl Environment variable options AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) dnl ============================================== dnl Locate essential tools dnl ============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl If PYTHON was specified, ensure it is an absolute path AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])]) dnl Require a minimum Python version AM_PATH_PYTHON([3.6]) AC_PROG_LN_S AC_PROG_MKDIR_P AC_PATH_PROG([GIT], [git], [false]) dnl Bash is needed for building man pages and running regression tests. dnl We set "BASH_PATH" because "BASH" is already an environment variable. REQUIRE_PROG([BASH_PATH], [bash]) AC_PATH_PROGS(PCMK__VALGRIND_EXEC, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(PCMK__VALGRIND_EXEC, "$PCMK__VALGRIND_EXEC", Valgrind command) dnl ============================================== dnl Package and schema versioning dnl ============================================== # Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case # the user used --with-version. Unfortunately, this can only affect the # substitution variables and later uses in this file, not the config.h # constants, so we have to be careful to use only PACEMAKER_VERSION in C code. PACKAGE_VERSION=$PACEMAKER_VERSION VERSION=$PACEMAKER_VERSION AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION", [Version number of this Pacemaker build]) AC_MSG_CHECKING([build version]) AS_IF([test "$GIT" != "false" && test -d .git], [ BUILD_VERSION=`"$GIT" log --pretty="format:%h" -n 1` AC_MSG_RESULT([$BUILD_VERSION (git hash)]) ], [ # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT([$BUILD_VERSION (directory name)]) ]) AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) # schema_files # List all manually edited RNG schemas (as opposed to auto-generated via make) # in the given directory. Use git if available to list managed RNGs, in case # there are leftover schema files from an earlier build of a different # version. Otherwise, check all RNGs. schema_files() { local files="$("$GIT" ls-files "$1"/*.rng 2>/dev/null)" AS_IF([test x"$files" = x""], [ files="$(ls -1 "$1"/*.rng | grep -E -v \ '/(pacemaker|api-result|crm_mon|versions)[^/]*\.rng')" ]) echo "$files" } # latest_schema_version # Determine highest RNG version in the given schema directory. latest_schema_version() { schema_files "$1" | sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' dnl | sort -V | tail -1 } # schemas_for_make # Like schema_files, but suitable for use in make variables. schemas_for_make() { local file for file in $(schema_files "$1"); do AS_ECHO_N(["\$(top_srcdir)/$file "]) done } # Detect highest API schema version API_VERSION=$(latest_schema_version "xml/api") AC_DEFINE_UNQUOTED([PCMK__API_VERSION], ["$API_VERSION"], [Highest API schema version]) # Detect highest CIB schema version CIB_VERSION=$(latest_schema_version "xml") AC_SUBST(CIB_VERSION) # Re-run configure at next make if schema files change, to re-detect versions cib_schemas="$(schemas_for_make "xml")" api_schemas="$(schemas_for_make "xml/api")" CONFIG_STATUS_DEPENDENCIES="$cib_schemas $api_schemas" AC_SUBST(CONFIG_STATUS_DEPENDENCIES) dnl ============================================== dnl Process simple options dnl ============================================== AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"]) AS_IF([test x"$with_concurrent_fencing_default" = x"true"], [PCMK__CONCURRENT_FENCING_DEFAULT_TRUE="1"], [PCMK__CONCURRENT_FENCING_DEFAULT_TRUE="0"]) AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT_TRUE], [$PCMK__CONCURRENT_FENCING_DEFAULT_TRUE], [Whether concurrent-fencing cluster option default is true]) AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT], [$with_sbd_sync_default], [Default value for SBD_SYNC_RESOURCE_STARTUP environment variable]) AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT], [$with_resource_stickiness_default], [Default value for resource-stickiness resource meta-attribute]) AS_IF([test x"${PCMK__GNUTLS_PRIORITIES}" != x""], [], [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])]) AC_DEFINE_UNQUOTED([PCMK__GNUTLS_PRIORITIES], ["$PCMK__GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) AC_SUBST(PCMK__GNUTLS_PRIORITIES) AC_SUBST(BUG_URL) AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"], [Where bugs should be reported]) AC_DEFINE_UNQUOTED([CRM_DAEMON_USER], ["$CRM_DAEMON_USER"], [User to run Pacemaker daemons as]) AC_SUBST(CRM_DAEMON_USER) AC_DEFINE_UNQUOTED([CRM_DAEMON_GROUP], ["$CRM_DAEMON_GROUP"], [Group to run Pacemaker daemons as]) AC_SUBST(CRM_DAEMON_GROUP) dnl ============================================== dnl Process file paths dnl ============================================== # expand_path_option [] # Given the name of a file path variable, expand any variable references # inside it, use the specified default if it is not specified, and ensure it # is a full path. expand_path_option() { # The first argument is the variable *name* (not value) ac_path_varname="$1" # Get the original value of the variable ac_path_value=$(eval echo "\${${ac_path_varname}}") # Expand any literal variable expressions in the value so that we don't # end up with something like '${prefix}' in #defines etc. # # Autoconf deliberately leaves values unexpanded to allow overriding # the configure script choices in make commands (for example, # "make exec_prefix=/foo install"). No longer being able to do this seems # like no great loss. eval ac_path_value=$(eval echo "${ac_path_value}") # Use (expanded) default if necessary AS_IF([test x"${ac_path_value}" = x""], [eval ac_path_value=$(eval echo "$2")]) # Require a full path AS_CASE(["$ac_path_value"], [/*], [eval ${ac_path_varname}="$ac_path_value"], [*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])] ) } dnl Expand values of autoconf-provided directory options expand_path_option prefix expand_path_option exec_prefix expand_path_option bindir expand_path_option sbindir expand_path_option libexecdir expand_path_option datarootdir expand_path_option datadir expand_path_option sysconfdir expand_path_option sharedstatedir expand_path_option localstatedir expand_path_option libdir expand_path_option includedir expand_path_option oldincludedir expand_path_option infodir expand_path_option mandir AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Expand values of custom directory options AS_IF([test x"$INITDIR" = x"try"], [ AC_MSG_CHECKING([for an init directory]) INITDIR=no for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d ${sysconfdir}/init.d do AS_IF([test -d $initdir], [ INITDIR=$initdir break ]) done AC_MSG_RESULT([$INITDIR]) ]) support_lsb=$DISABLED AM_CONDITIONAL([BUILD_LSB], [test x"${INITDIR}" != x"no"]) AM_COND_IF([BUILD_LSB], [ support_lsb=$REQUIRED expand_path_option INITDIR PCMK_FEATURES="$PCMK_FEATURES lsb" ], [ INITDIR="" ]) AC_SUBST(INITDIR) AC_DEFINE_UNQUOTED([PCMK__ENABLE_LSB], [$support_lsb], [Whether to support LSB resource agents]) AC_DEFINE_UNQUOTED([PCMK__LSB_INIT_DIR], ["$INITDIR"], [Location for LSB init scripts]) expand_path_option localedir "${datadir}/locale" AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"], [Base directory for message catalogs]) AS_IF([test x"${runstatedir}" = x""], [runstatedir="${pcmk_runstatedir}"]) expand_path_option runstatedir "${localstatedir}/run" AC_DEFINE_UNQUOTED([PCMK__RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) expand_path_option docdir "${datadir}/doc/${PACKAGE}-${VERSION}" AC_SUBST(docdir) expand_path_option CONFIGDIR "${sysconfdir}/sysconfig" AC_SUBST(CONFIGDIR) expand_path_option PCMK__COROSYNC_CONF "${sysconfdir}/corosync/corosync.conf" AC_SUBST(PCMK__COROSYNC_CONF) expand_path_option CRM_LOG_DIR "${localstatedir}/log/pacemaker" AC_DEFINE_UNQUOTED([CRM_LOG_DIR], ["$CRM_LOG_DIR"], [Location for Pacemaker log file]) AC_SUBST(CRM_LOG_DIR) expand_path_option CRM_BUNDLE_DIR "${localstatedir}/log/pacemaker/bundles" AC_DEFINE_UNQUOTED([CRM_BUNDLE_DIR], ["$CRM_BUNDLE_DIR"], [Location for Pacemaker bundle logs]) AC_SUBST(CRM_BUNDLE_DIR) expand_path_option PCMK__FENCE_BINDIR AC_SUBST(PCMK__FENCE_BINDIR) AC_DEFINE_UNQUOTED([PCMK__FENCE_BINDIR], ["$PCMK__FENCE_BINDIR"], [Location for executable fence agents]) expand_path_option PCMK_OCF_ROOT AC_SUBST(PCMK_OCF_ROOT) AC_DEFINE_UNQUOTED([PCMK_OCF_ROOT], ["$PCMK_OCF_ROOT"], [OCF root directory for resource agents and libraries]) expand_path_option PCMK__OCF_RA_PATH AC_SUBST(PCMK__OCF_RA_PATH) AC_DEFINE_UNQUOTED([PCMK__OCF_RA_PATH], ["$PCMK__OCF_RA_PATH"], [OCF directories to search for resource agents ]) expand_path_option OCF_RA_INSTALL_DIR AC_SUBST(OCF_RA_INSTALL_DIR) # Derived paths PCMK_SCHEMA_DIR="${datadir}/pacemaker" AC_DEFINE_UNQUOTED([PCMK_SCHEMA_DIR], ["$PCMK_SCHEMA_DIR"], [Location for the Pacemaker Relax-NG Schema]) AC_SUBST(PCMK_SCHEMA_DIR) PCMK__REMOTE_SCHEMA_DIR="${localstatedir}/lib/pacemaker/schemas" AC_DEFINE_UNQUOTED([PCMK__REMOTE_SCHEMA_DIR], ["$PCMK__REMOTE_SCHEMA_DIR"], [Location to store Relax-NG Schema files on remote nodes]) AC_SUBST(PCMK__REMOTE_SCHEMA_DIR) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"], [Directory Pacemaker daemons should change to (without systemd, core files will go here)]) AC_SUBST(CRM_CORE_DIR) PCMK__PERSISTENT_DATA_DIR="${localstatedir}/lib/pacemaker" AC_DEFINE_UNQUOTED([PCMK__PERSISTENT_DATA_DIR], ["$PCMK__PERSISTENT_DATA_DIR"], [Location to store directory produced by Pacemaker daemons]) AC_SUBST(PCMK__PERSISTENT_DATA_DIR) CRM_BLACKBOX_DIR="${localstatedir}/lib/pacemaker/blackbox" AC_DEFINE_UNQUOTED([CRM_BLACKBOX_DIR], ["$CRM_BLACKBOX_DIR"], [Where to keep blackbox dumps]) AC_SUBST(CRM_BLACKBOX_DIR) PCMK_SCHEDULER_INPUT_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED([PCMK_SCHEDULER_INPUT_DIR], ["$PCMK_SCHEDULER_INPUT_DIR"], [Where to keep scheduler outputs]) AC_SUBST(PCMK_SCHEDULER_INPUT_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED([CRM_CONFIG_DIR], ["$CRM_CONFIG_DIR"], [Where to keep configuration files]) AC_SUBST(CRM_CONFIG_DIR) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED([CRM_DAEMON_DIR], ["$CRM_DAEMON_DIR"], [Location for Pacemaker daemons]) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) PCMK__OCF_TMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED([PCMK__OCF_TMP_DIR], ["$PCMK__OCF_TMP_DIR"], [Where resource agents should keep state files]) AC_SUBST(PCMK__OCF_TMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED([PACEMAKER_CONFIG_DIR], ["$PACEMAKER_CONFIG_DIR"], [Where to keep configuration files like authkey]) AC_SUBST(PACEMAKER_CONFIG_DIR) AC_DEFINE_UNQUOTED([SBIN_DIR], ["$sbindir"], [Location for system binaries]) # Warn about any directories that don't exist (which may be OK) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR localedir do dirname=`eval echo '${'${j}'}'` AS_IF([test -n "$dirname" && test ! -d "$dirname"], [AC_MSG_WARN([$j directory ($dirname) does not exist (yet)])]) done dnl =============================================== dnl General Processing dnl =============================================== us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([HAVE_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([HAVE_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) AS_IF([test -z "${us_auth}"], [ # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([HAVE_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([HAVE_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_FAILURE([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) ]) dnl OS-based decision-making is poor autotools practice; feature-based dnl mechanisms are strongly preferred. Keep this section to a bare minimum; dnl regard as a "necessary evil". dnl Set host_os and host_cpu AC_CANONICAL_HOST INIT_EXT="" PROCFS=0 dnl Solaris and some *BSD versions support procfs but not files we need AS_CASE(["$host_os"], [*bsd*], [INIT_EXT=".sh"], [*linux*], [PROCFS=1], [darwin*], [ LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ]) AC_SUBST(INIT_EXT) AM_CONDITIONAL([SUPPORT_PROCFS], [test $PROCFS -eq 1]) AC_DEFINE_UNQUOTED([HAVE_LINUX_PROCFS], [$PROCFS], [Define to 1 if procfs is supported]) AS_CASE(["$host_cpu"], [ppc64|powerpc64], [ AS_CASE([$CFLAGS], [*powerpc64*], [], [*], [AS_IF([test x"$GCC" = x"yes"], [CFLAGS="$CFLAGS -m64"]) ]) ]) dnl ============================================== dnl Documentation build dependencies and checks dnl ============================================== AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") AS_IF([test x"${HELP2MAN}" != x""], [PCMK_FEATURES="$PCMK_FEATURES generated-manpages"]) MANPAGE_XSLT="" AS_IF([test x"${XSLTPROC}" != x""], [ AC_MSG_CHECKING([for DocBook-to-manpage transform]) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') AS_IF([test x"${MANPAGE_XSLT}" = x""], [ DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d 2>/dev/null | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS} do AS_IF([test -f "${d}/${XSLT}"], [ MANPAGE_XSLT="${d}/${XSLT}" break ]) done ]) ]) AC_MSG_RESULT([$MANPAGE_XSLT]) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") AS_IF([test x"${MANPAGE_XSLT}" != x""], [PCMK_FEATURES="$PCMK_FEATURES agent-manpages"]) AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) AS_IF([test x"${ASCIIDOC_CONV}" != x""], [PCMK_FEATURES="$PCMK_FEATURES ascii-docs"]) AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""]) AM_COND_IF([BUILD_SPHINX_DOCS], [PCMK_FEATURES="$PCMK_FEATURES books"]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" AS_IF([test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH"], [ $GETOPT_PATH -T >/dev/null 2>/dev/null AS_IF([test $? -eq 4], [break]) ]) GETOPT_PATH="" done IFS=$IFS_orig AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])], [ AC_MSG_RESULT([no]) AC_MSG_ERROR([Could not find required build tool GNU-compatible getopt]) ]) AC_SUBST([GETOPT_PATH]) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) PKG_CHECK_MODULES([UUID], [uuid], [CPPFLAGS="${CPPFLAGS} ${UUID_CFLAGS}" LIBS="${LIBS} ${UUID_LIBS}"]) AC_CHECK_FUNCS([sched_setscheduler]) AS_IF([test x"$ac_cv_func_sched_setscheduler" != x"yes"], [PC_LIBS_RT=""], [PC_LIBS_RT="-lrt"]) AC_SUBST(PC_LIBS_RT) # Require minimum glib version PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.42.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # Check whether high-resolution sleep function is available AC_CHECK_FUNCS([nanosleep usleep]) # # Where is dlopen? # AS_IF([test x"$ac_cv_lib_c_dlopen" = x"yes"], [LIBADD_DL=""], [test x"$ac_cv_lib_dl_dlopen" = x"yes"], [LIBADD_DL=-ldl], [LIBADD_DL=${lt_cv_dlopen_libs}]) PKG_CHECK_MODULES(LIBXML2, [libxml-2.0 >= 2.9.2], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) AC_PATH_PROGS(XMLLINT_PATH, xmllint, /usr/bin/xmllint) AC_DEFINE_UNQUOTED(XMLLINT_PATH, "$XMLLINT_PATH", xmllint command) REQUIRE_LIB([xslt], [xsltApplyStylesheet]) AC_MSG_CHECKING([whether __progname and __progname_full are available]) AC_LINK_IFELSE([AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]], [[__progname = "foo"; __progname_full = "foo bar";]])], [ have_progname="yes" AC_DEFINE(HAVE_PROGNAME, 1, [Define to 1 if processes can change their name]) ], [have_progname="no"]) AC_MSG_RESULT([$have_progname]) dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) cc_temp_flags "$CFLAGS $WERROR" # Optional headers (inclusion of these should be conditional in C code) AC_CHECK_HEADERS([linux/swab.h]) AC_CHECK_HEADERS([stddef.h]) AC_CHECK_HEADERS([sys/signalfd.h]) AC_CHECK_HEADERS([uuid/uuid.h]) AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h]) AS_IF([test x"$ac_cv_lib_pam_pam_start" = x"yes"], AS_IF([test x"$ac_cv_header_security_pam_appl_h" = x"yes" dnl || test x"$ac_cv_header_pam_pam_appl_h" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES pam"])) # Required headers REQUIRE_HEADER([arpa/inet.h]) REQUIRE_HEADER([ctype.h]) REQUIRE_HEADER([dirent.h]) REQUIRE_HEADER([dlfcn.h]) REQUIRE_HEADER([errno.h]) REQUIRE_HEADER([fcntl.h]) REQUIRE_HEADER([float.h]) REQUIRE_HEADER([glib.h]) REQUIRE_HEADER([grp.h]) REQUIRE_HEADER([inttypes.h]) REQUIRE_HEADER([libgen.h]) REQUIRE_HEADER([limits.h]) REQUIRE_HEADER([locale.h]) REQUIRE_HEADER([netdb.h]) REQUIRE_HEADER([netinet/in.h]) REQUIRE_HEADER([netinet/ip.h], [ #include #include ]) REQUIRE_HEADER([netinet/tcp.h]) REQUIRE_HEADER([pwd.h]) REQUIRE_HEADER([regex.h]) REQUIRE_HEADER([sched.h]) REQUIRE_HEADER([signal.h]) REQUIRE_HEADER([stdarg.h]) REQUIRE_HEADER([stdbool.h]) REQUIRE_HEADER([stdint.h]) REQUIRE_HEADER([stdio.h]) REQUIRE_HEADER([stdlib.h]) REQUIRE_HEADER([string.h]) REQUIRE_HEADER([strings.h]) REQUIRE_HEADER([sys/ioctl.h]) REQUIRE_HEADER([sys/param.h]) REQUIRE_HEADER([sys/reboot.h]) REQUIRE_HEADER([sys/resource.h]) REQUIRE_HEADER([sys/socket.h]) REQUIRE_HEADER([sys/stat.h]) REQUIRE_HEADER([sys/time.h]) REQUIRE_HEADER([sys/types.h]) REQUIRE_HEADER([sys/uio.h]) REQUIRE_HEADER([sys/utsname.h]) REQUIRE_HEADER([sys/wait.h]) REQUIRE_HEADER([termios.h]) REQUIRE_HEADER([time.h]) REQUIRE_HEADER([unistd.h]) REQUIRE_HEADER([libxml/xpath.h]) REQUIRE_HEADER([libxslt/xslt.h]) cc_restore_flags dnl ======================================================================== dnl Generic declarations dnl ======================================================================== AC_CHECK_DECLS([CLOCK_MONOTONIC], [PCMK_FEATURES="$PCMK_FEATURES monotonic"], [], [[ #include ]]) dnl ======================================================================== dnl Unit test declarations dnl ======================================================================== AC_CHECK_DECLS([assert_float_equal], [], [], [[ #include #include #include #include ]]) dnl ======================================================================== dnl Byte size dnl ======================================================================== # Compile-time assert hack # https://jonjagger.blogspot.com/2017/07/compile-time-assertions-in-c.html AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[ switch (0) { case 0: case (CHAR_BIT == 8): break; } ]])], [], [AC_MSG_FAILURE(m4_normalize([Pacemaker is not supported on platforms where char is not 8 bits]))]) dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== REQUIRE_FUNC([alphasort]) REQUIRE_FUNC([getopt]) REQUIRE_FUNC([scandir]) REQUIRE_FUNC([setenv]) REQUIRE_FUNC([strndup]) REQUIRE_FUNC([strnlen]) REQUIRE_FUNC([unsetenv]) REQUIRE_FUNC([uuid_unparse]) REQUIRE_FUNC([vasprintf]) AC_CHECK_FUNCS([strchrnul]) AC_CHECK_FUNCS([fopen64]) AM_CONDITIONAL([WRAPPABLE_FOPEN64], [test x"$ac_cv_func_fopen64" = x"yes"]) AC_MSG_CHECKING([whether strerror always returns non-NULL]) AC_RUN_IFELSE([AC_LANG_PROGRAM([[ #include #include ]], [[ return strerror(-1) == NULL; ]])], [AC_MSG_RESULT([yes])], [AC_MSG_ERROR([strerror() is not C99-compliant])], [AC_MSG_ERROR([strerror() is not C99-compliant])]) AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include ]], [[ const char *s = "some-command-line-arg"; char *name = NULL; int n = sscanf(s, "%ms", &name); return n != 1; ]])], [have_sscanf_m="yes"], [have_sscanf_m="no"], [have_sscanf_m="no"]) AS_IF([test x"$have_sscanf_m" = x"yes"], [AC_DEFINE([HAVE_SSCANF_M], [1], [Define to 1 if sscanf %m modifier is available])]) dnl ======================================================================== dnl bzip2 dnl ======================================================================== REQUIRE_HEADER([bzlib.h]) REQUIRE_LIB([bz2], [BZ2_bzBuffToBuffCompress]) dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[sighandler_t *f;]])], [ AC_MSG_RESULT([yes]) AC_DEFINE([HAVE_SIGHANDLER_T], [1], [Define to 1 if sighandler_t is available]) ], [AC_MSG_RESULT([no])]) dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurses takes precedence. dnl AC_CHECK_HEADERS([curses.h curses/curses.h ncurses.h ncurses/ncurses.h]) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" AS_IF([test x"$ac_cv_header_ncurses_h" = x"yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" ]) AS_IF([test x"$ac_cv_header_ncurses_ncurses_h" = x"yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" ]) dnl Only look for non-n-library if there was no n-library. AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_h" = x"yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" ]) dnl Only look for non-n-library if there was no n-library. AS_IF([test x"$CURSESLIBS" = x"" && test x"$ac_cv_header_curses_curses_h" = x"yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" ]) AS_IF([test x"$CURSESLIBS" != x""], [PCMK_FEATURES="$PCMK_FEATURES ncurses"]) dnl Check for printw() prototype compatibility AS_IF([test x"$CURSESLIBS" != x"" && cc_supports_flag -Wcast-qual], [ ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 AS_IF([cc_supports_flag -fPIC], [cc_temp_flags "-Wcast-qual $WERROR -fPIC"], [cc_temp_flags "-Wcast-qual $WERROR"]) AC_MSG_CHECKING([whether curses library is compatible]) AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [AC_MSG_RESULT([yes])], [ AC_MSG_RESULT([no]) AC_MSG_WARN(m4_normalize([Disabling curses because the printw() function of your (n)curses library is old. If you wish to enable curses, update to a newer version (ncurses 5.4 or later is recommended, available from https://invisible-island.net/ncurses/) ])) AC_DEFINE([HAVE_INCOMPATIBLE_PRINTW], [1], [Define to 1 if curses library has incompatible printw()]) ] ) LIBS=$ac_save_LIBS cc_restore_flags ]) AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== CFLAGS_ORIG="$CFLAGS" AS_IF([test $with_coverage -ne $DISABLED], [ with_profiling=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ] ) AS_IF([test $with_profiling -ne $DISABLED], [ with_profiling=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES profile" dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin" dnl CFLAGS="$CFLAGS -fno-inline-functions" dnl CFLAGS="$CFLAGS -fno-default-inline" dnl CFLAGS="$CFLAGS -fno-inline-functions-called-once" dnl CFLAGS="$CFLAGS -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed \ -e 's/-O.\ //g' \ -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' \ -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" AC_MSG_NOTICE([CFLAGS before adding profiling options: $CFLAGS_ORIG]) AC_MSG_NOTICE([CFLAGS after: $CFLAGS]) ] ) AM_CONDITIONAL([BUILD_PROFILING], [test "$with_profiling" = "$REQUIRED"]) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== PKG_CHECK_MODULES([libqb], [libqb >= 1.0.1]) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 2.0.5+ (2022-03) AC_CHECK_FUNCS([qb_ipcc_connect_async]) dnl libqb 2.0.2+ (2020-10) AC_CHECK_FUNCS([qb_ipcc_auth_get]) dnl libqb 2.0.0+ (2020-05) dnl also defines QB_FEATURE_LOG_HIRES_TIMESTAMPS CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available AS_IF([test x"$cross_compiling" != x"yes"], [CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat"]) AC_CHECK_HEADERS([stonith/stonith.h], [ AC_CHECK_LIB([pils], [PILLoadPlugin]) AC_CHECK_LIB([plumb], [G_main_add_IPC_Channel]) PCMK_FEATURES="$PCMK_FEATURES lha" ]) AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test x"$ac_cv_header_stonith_stonith_h" = x"yes"]) -dnl =============================================== -dnl Detect DBus, systemd, and Upstart support -dnl =============================================== +dnl =============================== +dnl Detect DBus and systemd support +dnl =============================== HAVE_dbus=0 PC_NAME_DBUS="" PKG_CHECK_MODULES([DBUS],[dbus-1 >= 1.5.12], [ HAVE_dbus=1 PC_NAME_DBUS="dbus-1" CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}" ],[]) AC_DEFINE_UNQUOTED(HAVE_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) AC_SUBST(PC_NAME_DBUS) check_systemdsystemunitdir() { AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) test x"$systemdsystemunitdir" != x"" return $? } AS_CASE([$enable_systemd], [$REQUIRED], [ AS_IF([test $HAVE_dbus = 0], [AC_MSG_FAILURE([Cannot support systemd resources without DBus])]) AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"], [AC_MSG_FAILURE([Cannot support systemd resources without monotonic clock])]) AS_IF([check_systemdsystemunitdir], [], [AC_MSG_FAILURE([Cannot support systemd resources without systemdsystemunitdir])]) ], [$OPTIONAL], [ AS_IF([test $HAVE_dbus = 0 \ || test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [enable_systemd=$DISABLED], [ AC_MSG_CHECKING([for systemd version (using dbus-send)]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) AS_IF([test x"$ret" != x"unavailable" \ || systemctl --version 2>/dev/null | grep -q systemd], [ AS_IF([check_systemdsystemunitdir], [enable_systemd=$REQUIRED], [enable_systemd=$DISABLED]) ], [enable_systemd=$DISABLED] ) ]) ], ) AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AS_IF([test $enable_systemd -eq $DISABLED], [AC_MSG_RESULT([no])], [ AC_MSG_RESULT([yes]) PCMK_FEATURES="$PCMK_FEATURES systemd" ] ) AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED([SUPPORT_SYSTEMD], [$enable_systemd], [Support systemd resources]) AM_CONDITIONAL([BUILD_SYSTEMD], [test $enable_systemd = $REQUIRED]) AC_SUBST(SUPPORT_SYSTEMD) -AS_CASE([$enable_upstart], - [$REQUIRED], [ - AS_IF([test $HAVE_dbus = 0], - [AC_MSG_FAILURE([Cannot support Upstart resources without DBus])]) - ], - [$OPTIONAL], [ - AS_IF([test $HAVE_dbus = 0], [enable_upstart=$DISABLED], - [ - AC_MSG_CHECKING([for Upstart version (using dbus-send)]) - ret=$({ dbus-send --system --print-reply \ - --dest=com.ubuntu.Upstart \ - /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ - string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ - || echo "version unavailable"; } | tail -n1) - # sanitize output a bit (interested just in value, not type), - # ret is intentionally unenquoted so as to normalize whitespace - ret=$(echo ${ret} | cut -d' ' -f2-) - AC_MSG_RESULT([${ret}]) - AS_IF([test x"$ret" != x"unavailable" \ - || initctl --version 2>/dev/null | grep -q upstart], - [enable_upstart=$REQUIRED], - [enable_upstart=$DISABLED] - ) - ]) - ], -) -AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) -AS_IF([test $enable_upstart -eq $DISABLED], [AC_MSG_RESULT([no])], - [ - AC_MSG_RESULT([yes]) - PCMK_FEATURES="$PCMK_FEATURES upstart" - ] -) -AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart], - [Support Upstart resources]) -AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED]) -AC_SUBST(SUPPORT_UPSTART) - - dnl ======================================================================== dnl Detect Nagios support dnl ======================================================================== AS_CASE([$with_nagios], [$REQUIRED], [ AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [AC_MSG_FAILURE([Cannot support nagios resources without monotonic clock])]) ], [$OPTIONAL], [ AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"], [with_nagios=$DISABLED], [with_nagios=$REQUIRED]) ] ) AS_IF([test $with_nagios -eq $REQUIRED], [PCMK_FEATURES="$PCMK_FEATURES nagios"]) AC_DEFINE_UNQUOTED([SUPPORT_NAGIOS], [$with_nagios], [Support nagios plugins]) AM_CONDITIONAL([BUILD_NAGIOS], [test $with_nagios -eq $REQUIRED]) AS_IF([test x"$NAGIOS_PLUGIN_DIR" = x""], [NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"]) AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) AS_IF([test x"$NAGIOS_METADATA_DIR" = x""], [NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata"]) AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Detect support for "service" alias dnl ======================================================================== PCMK__ENABLE_SERVICE=$DISABLED AM_COND_IF([BUILD_LSB], [PCMK__ENABLE_SERVICE=$REQUIRED]) AM_COND_IF([BUILD_SYSTEMD], [PCMK__ENABLE_SERVICE=$REQUIRED]) -AM_COND_IF([BUILD_UPSTART], [PCMK__ENABLE_SERVICE=$REQUIRED]) AS_IF([test $PCMK__ENABLE_SERVICE -ne $DISABLED], [PCMK_FEATURES="$PCMK_FEATURES service"]) AC_SUBST(PCMK__ENABLE_SERVICE) AC_DEFINE_UNQUOTED([PCMK__ENABLE_SERVICE], [$PCMK__ENABLE_SERVICE], [Whether "service" is supported as an agent standard]) dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== COROSYNC_LIBS="" AS_CASE([$with_corosync], [$REQUIRED], [ # These will be fatal if unavailable PKG_CHECK_MODULES([cpg], [libcpg]) PKG_CHECK_MODULES([cfg], [libcfg]) PKG_CHECK_MODULES([cmap], [libcmap]) PKG_CHECK_MODULES([quorum], [libquorum]) PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common]) ] [$OPTIONAL], [ PKG_CHECK_MODULES([cpg], [libcpg], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([cfg], [libcfg], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([cmap], [libcmap], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([quorum], [libquorum], [], [with_corosync=$DISABLED]) PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common], [], [with_corosync=$DISABLED]) AS_IF([test $with_corosync -ne $DISABLED], [with_corosync=$REQUIRED]) ] ) AS_IF([test $with_corosync -ne $DISABLED], [ AC_MSG_CHECKING([for Corosync 2 or later]) AC_MSG_RESULT([yes]) CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" CPPFLAGS="$CPPFLAGS `$PKG_CONFIG --cflags-only-I corosync`" COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-ge-2" dnl Shutdown tracking added (back) to corosync Jan 2021 saved_LIBS="$LIBS" LIBS="$LIBS $COROSYNC_LIBS" AC_CHECK_FUNCS([corosync_cfg_trackstart]) LIBS="$saved_LIBS" ] ) AC_DEFINE_UNQUOTED([SUPPORT_COROSYNC], [$with_corosync], [Support the Corosync messaging and membership layer]) AM_CONDITIONAL([BUILD_CS_SUPPORT], [test $with_corosync -eq $REQUIRED]) AC_SUBST([SUPPORT_COROSYNC]) dnl dnl Cluster stack - Sanity dnl AS_IF([test x"$STACKS" != x""], [AC_MSG_NOTICE([Supported stacks:${STACKS}])], [AC_MSG_FAILURE([At least one cluster stack must be supported])]) PCMK_FEATURES="${PCMK_FEATURES}${STACKS}" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== AS_IF([test $with_cibsecrets -ne $DISABLED], [ with_cibsecrets=$REQUIRED PCMK_FEATURES="$PCMK_FEATURES cibsecrets" PCMK__CIB_SECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED([PCMK__CIB_SECRETS_DIR], ["$PCMK__CIB_SECRETS_DIR"], [Location for CIB secrets]) AC_SUBST([PCMK__CIB_SECRETS_DIR]) ] ) AC_DEFINE_UNQUOTED([PCMK__ENABLE_CIBSECRETS], [$with_cibsecrets], [Support CIB secrets]) AM_CONDITIONAL([BUILD_CIBSECRETS], [test $with_cibsecrets -eq $REQUIRED]) dnl ======================================================================== dnl GnuTLS dnl ======================================================================== PKG_CHECK_MODULES(GNUTLS, [gnutls >= 3.1.7], [CPPFLAGS="${CPPFLAGS} ${GNUTLS_CFLAGS}" LIBS="${LIBS} ${GNUTLS_LIBS}"]) # --- ASAN/UBSAN/TSAN (see man gcc) --- # when using SANitizers, we need to pass the -fsanitize.. # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be # specified as first in the list or there will be runtime # issues (for example user has to LD_PRELOAD asan for it to work # properly). AS_IF([test -n "${SANITIZERS}"], [ SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g') for SANITIZER in $SANITIZERS do AS_CASE([$SANITIZER], [asan|ASAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan" PCMK_FEATURES="$PCMK_FEATURES asan" REQUIRE_LIB([asan],[main]) ], [ubsan|UBSAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan" PCMK_FEATURES="$PCMK_FEATURES ubsan" REQUIRE_LIB([ubsan],[main]) ], [tsan|TSAN], [ SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan" PCMK_FEATURES="$PCMK_FEATURES tsan" REQUIRE_LIB([tsan],[main]) ]) done ]) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. AS_IF([export | fgrep " CFLAGS=" > /dev/null], [ SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS ]) CC_EXTRAS="" AS_IF([test x"$GCC" != x"yes"], [CFLAGS="$CFLAGS -g"], [ CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags AS_IF([cc_supports_flag "-Wformat-nonliteral"], [gcc_format_nonliteral=yes], [gcc_format_nonliteral=no]) # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline" EXTRA_FLAGS="$EXTRA_FLAGS -Wall" EXTRA_FLAGS="$EXTRA_FLAGS -Waggregate-return" EXTRA_FLAGS="$EXTRA_FLAGS -Wbad-function-cast" EXTRA_FLAGS="$EXTRA_FLAGS -Wcast-align" EXTRA_FLAGS="$EXTRA_FLAGS -Wdeclaration-after-statement" EXTRA_FLAGS="$EXTRA_FLAGS -Wendif-labels" EXTRA_FLAGS="$EXTRA_FLAGS -Wfloat-equal" EXTRA_FLAGS="$EXTRA_FLAGS -Wformat-security" EXTRA_FLAGS="$EXTRA_FLAGS -Wimplicit-fallthrough" EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-prototypes" EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-declarations" EXTRA_FLAGS="$EXTRA_FLAGS -Wnested-externs" EXTRA_FLAGS="$EXTRA_FLAGS -Wno-long-long" EXTRA_FLAGS="$EXTRA_FLAGS -Wno-strict-aliasing" EXTRA_FLAGS="$EXTRA_FLAGS -Wpointer-arith" EXTRA_FLAGS="$EXTRA_FLAGS -Wstrict-prototypes" EXTRA_FLAGS="$EXTRA_FLAGS -Wwrite-strings" EXTRA_FLAGS="$EXTRA_FLAGS -Wunused-but-set-variable" EXTRA_FLAGS="$EXTRA_FLAGS -Wunsigned-char" AS_IF([test x"$gcc_diagnostic_push_pull" = x"yes"], [ AC_DEFINE([HAVE_FORMAT_NONLITERAL], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" ], [test x"$gcc_format_nonliteral" = x"yes"], [EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"]) # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do AS_IF([cc_supports_flag $CC_EXTRAS $j], [CC_EXTRAS="$CC_EXTRAS $j"]) done AC_MSG_NOTICE([Using additional gcc flags: ${CC_EXTRAS}]) ]) dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl pacemaker-based, pacemaker-fenced, pacemaker-remoted, dnl pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl AS_IF([test $enable_hardening -eq $OPTIONAL], [ AS_IF([test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0], [enable_hardening=$REQUIRED], [AC_MSG_NOTICE([Hardening: using custom flags from environment])] ) ], [ unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB ] ) AS_CASE([$enable_hardening], [$DISABLED], [AC_MSG_NOTICE([Hardening: explicitly disabled])], [$REQUIRED], [ CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 stackprot="none" # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" relro=1 ]) # daemons: PIE for both CFLAGS and LDFLAGS AS_IF([cc_supports_flag -fPIE], [ flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [ CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE" LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" pie=1 ]) ] ) # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries AS_IF([test "${relro}" = 1 && test "${pie}" = 1], [ flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" bindnow=1 ]) ] ) AS_IF([test "${bindnow}" = 1], [ flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}" LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" ]) ]) # universal: prefer strong > all > default stack protector if possible flag= AS_IF([cc_supports_flag -fstack-protector-strong], [ flag="-fstack-protector-strong" stackprot="strong" ], [cc_supports_flag -fstack-protector-all], [ flag="-fstack-protector-all" stackprot="all" ], [cc_supports_flag -fstack-protector], [ flag="-fstack-protector" stackprot="default" ] ) AS_IF([test -n "${flag}"], [CC_EXTRAS="${CC_EXTRAS} ${flag}"]) # universal: enable stack clash protection if possible AS_IF([cc_supports_flag -fstack-clash-protection], [ CC_EXTRAS="${CC_EXTRAS} -fstack-clash-protection" AS_IF([test "${stackprot}" = "none"], [stackprot="clash-only"], [stackprot="${stackprot}+clash"] ) ] ) # Log a summary AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test x"${stackprot}" != x"none"], [AC_MSG_NOTICE(m4_normalize([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${stackprot}])) ], [AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])] ) ], ) CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS" LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS" CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE" LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl AS_IF([test $enable_fatal_warnings -ne $DISABLED], [ AC_MSG_NOTICE([Enabling fatal compiler warnings]) CFLAGS="$CFLAGS $WERROR" ]) AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output AS_IF([test $enable_quiet -ne $DISABLED], [ AC_MSG_NOTICE([Suppressing make details]) QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant ], [ QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" ] ) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" # Make features list available (sorted alphabetically, without leading space) PCMK_FEATURES=`echo "$PCMK_FEATURES" | sed -e 's/^ //' -e 's/ /\n/g' | sort | xargs` AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) dnl Files we output that need to be executable CONFIG_FILES_EXEC([agents/ocf/ClusterMon], [agents/ocf/Dummy], [agents/ocf/HealthCPU], [agents/ocf/HealthIOWait], [agents/ocf/HealthSMART], [agents/ocf/Stateful], [agents/ocf/SysInfo], [agents/ocf/attribute], [agents/ocf/controld], [agents/ocf/ifspeed], [agents/ocf/ping], [agents/ocf/remote], [agents/stonith/fence_legacy], [agents/stonith/fence_watchdog], [cts/cluster_test], [cts/cts], [cts/cts-attrd], [cts/cts-cli], [cts/cts-exec], [cts/cts-fencing], [cts/cts-lab], [cts/cts-regression], [cts/cts-scheduler], [cts/cts-schemas], [cts/benchmark/clubench], [cts/support/LSBDummy], [cts/support/cts-support], [cts/support/fence_dummy], [cts/support/pacemaker-cts-dummyd], [doc/abi-check], [maint/bumplibs], [tools/cluster-clean], [tools/cluster-helper], [tools/crm_failcount], [tools/crm_master], [tools/crm_report], [tools/crm_standby], [tools/cibsecret], [tools/pcmk_simtimes], [xml/rng-helper]) dnl Other files we output AC_CONFIG_FILES(Makefile \ agents/Makefile \ agents/alerts/Makefile \ agents/ocf/Makefile \ agents/stonith/Makefile \ cts/Makefile \ cts/benchmark/Makefile \ cts/scheduler/Makefile \ cts/scheduler/dot/Makefile \ cts/scheduler/exp/Makefile \ cts/scheduler/scores/Makefile \ cts/scheduler/stderr/Makefile \ cts/scheduler/summary/Makefile \ cts/scheduler/xml/Makefile \ cts/support/Makefile \ cts/support/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ - daemons/pacemakerd/pacemaker.combined.upstart \ daemons/pacemakerd/pacemaker.service \ - daemons/pacemakerd/pacemaker.upstart \ daemons/schedulerd/Makefile \ devel/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/sphinx/Makefile \ etc/Makefile \ etc/init.d/pacemaker \ etc/logrotate.d/pacemaker \ etc/sysconfig/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ lib/Makefile \ lib/cib/Makefile \ lib/cluster/Makefile \ lib/cluster/tests/Makefile \ lib/cluster/tests/cluster/Makefile \ lib/cluster/tests/cpg/Makefile \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/acl/Makefile \ lib/common/tests/actions/Makefile \ lib/common/tests/agents/Makefile \ lib/common/tests/cmdline/Makefile \ lib/common/tests/digest/Makefile \ lib/common/tests/flags/Makefile \ lib/common/tests/health/Makefile \ lib/common/tests/io/Makefile \ lib/common/tests/iso8601/Makefile \ lib/common/tests/lists/Makefile \ lib/common/tests/messages/Makefile \ lib/common/tests/nodes/Makefile \ lib/common/tests/nvpair/Makefile \ lib/common/tests/options/Makefile \ lib/common/tests/output/Makefile \ lib/common/tests/patchset/Makefile \ lib/common/tests/probes/Makefile \ lib/common/tests/procfs/Makefile \ lib/common/tests/resources/Makefile \ lib/common/tests/results/Makefile \ lib/common/tests/rules/Makefile \ lib/common/tests/scheduler/Makefile \ lib/common/tests/schemas/Makefile \ lib/common/tests/scores/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/common/tests/xml/Makefile \ lib/common/tests/xpath/Makefile \ lib/fencing/Makefile \ lib/libpacemaker.pc \ lib/lrmd/Makefile \ lib/pacemaker/Makefile \ lib/pacemaker/tests/Makefile \ lib/pacemaker/tests/pcmk_resource/Makefile \ lib/pacemaker/tests/pcmk_ticket/Makefile \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-cluster.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/native/Makefile \ lib/pengine/tests/status/Makefile \ lib/pengine/tests/unpack/Makefile \ lib/pengine/tests/utils/Makefile \ lib/services/Makefile \ maint/Makefile \ po/Makefile.in \ python/Makefile \ python/setup.py \ python/pacemaker/Makefile \ python/pacemaker/_cts/Makefile \ python/pacemaker/_cts/tests/Makefile \ python/pacemaker/buildoptions.py \ python/tests/Makefile \ rpm/Makefile \ tests/Makefile \ tools/Makefile \ tools/crm_mon.service \ - tools/crm_mon.upstart \ tools/report.collector \ tools/report.common \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_NOTICE([]) AC_MSG_NOTICE([$PACKAGE configuration:]) AC_MSG_NOTICE([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_NOTICE([ Features = ${PCMK_FEATURES}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ Prefix = ${prefix}]) AC_MSG_NOTICE([ Executables = ${sbindir}]) AC_MSG_NOTICE([ Man pages = ${mandir}]) AC_MSG_NOTICE([ Libraries = ${libdir}]) AC_MSG_NOTICE([ Header files = ${includedir}]) AC_MSG_NOTICE([ Arch-independent files = ${datadir}]) AC_MSG_NOTICE([ State information = ${localstatedir}]) AC_MSG_NOTICE([ System configuration = ${sysconfdir}]) AC_MSG_NOTICE([ OCF agents = ${PCMK_OCF_ROOT}]) AM_COND_IF([BUILD_LSB], [AC_MSG_NOTICE([ LSB agents = ${INITDIR}])]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_NOTICE([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([ CFLAGS = ${CFLAGS}]) AC_MSG_NOTICE([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_NOTICE([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_NOTICE([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_NOTICE([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_NOTICE([ Libraries = ${LIBS}]) AC_MSG_NOTICE([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_NOTICE([ Unix socket auth method = ${us_auth}]) diff --git a/cts/cts-exec.in b/cts/cts-exec.in index 6a15f9828f..0a68da3410 100644 --- a/cts/cts-exec.in +++ b/cts/cts-exec.in @@ -1,989 +1,925 @@ #!@PYTHON@ """Regression tests for Pacemaker's pacemaker-execd.""" # pylint doesn't like the module name "cts-execd" which is an invalid complaint for this file # but probably something we want to continue warning about elsewhere # pylint: disable=invalid-name # pacemaker imports need to come after we modify sys.path, which pylint will complain about. # pylint: disable=wrong-import-position __copyright__ = "Copyright 2012-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import argparse import os import stat import sys import subprocess import shutil import tempfile # Where to find test binaries # Prefer the source tree if available TEST_DIR = sys.path[0] # These imports allow running from a source checkout after running `make`. # Note that while this doesn't necessarily mean it will successfully run tests, # but being able to see --help output can be useful. if os.path.exists("@abs_top_srcdir@/python"): sys.path.insert(0, "@abs_top_srcdir@/python") # pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@": sys.path.insert(0, "@abs_top_builddir@/python") from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus from pacemaker._cts.corosync import Corosync from pacemaker._cts.process import killall, exit_if_proc_running, stdout_from_command from pacemaker._cts.test import Test, Tests # File permissions for executable scripts we create EXECMODE = stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH def update_path(): # pylint: disable=protected-access """Set the PATH environment variable appropriately for the tests.""" new_path = os.environ['PATH'] if os.path.exists("%s/cts-exec.in" % TEST_DIR): print("Running tests from the source tree: %s (%s)" % (BuildOptions._BUILD_DIR, TEST_DIR)) # For pacemaker-execd, cts-exec-helper, and pacemaker-remoted new_path = "%s/daemons/execd:%s" % (BuildOptions._BUILD_DIR, new_path) new_path = "%s/tools:%s" % (BuildOptions._BUILD_DIR, new_path) # For crm_resource # For pacemaker-fenced new_path = "%s/daemons/fenced:%s" % (BuildOptions._BUILD_DIR, new_path) # For cts-support new_path = "%s/cts/support:%s" % (BuildOptions._BUILD_DIR, new_path) else: print("Running tests from the install tree: %s (not %s)" % (BuildOptions.DAEMON_DIR, TEST_DIR)) # For cts-exec-helper, cts-support, pacemaker-execd, pacemaker-fenced, # and pacemaker-remoted new_path = "%s:%s" % (BuildOptions.DAEMON_DIR, new_path) print('Using PATH="%s"' % new_path) os.environ['PATH'] = new_path class ExecTest(Test): """Executor for a single pacemaker-execd regression test.""" def __init__(self, name, description, **kwargs): """Create a new ExecTest instance. Arguments: name -- A unique name for this test. This can be used on the command line to specify that only a specific test should be executed. description -- A meaningful description for the test. Keyword arguments: tls -- Enable pacemaker-remoted. """ Test.__init__(self, name, description, **kwargs) self.tls = kwargs.get("tls", False) # If we are going to run the stonith resource tests, we will need to # launch and track Corosync and pacemaker-fenced. self._corosync = None self._fencer = None self._is_stonith_test = "stonith" in self.name if self.tls: self._daemon_location = "pacemaker-remoted" else: self._daemon_location = "pacemaker-execd" if self._is_stonith_test: self._corosync = Corosync(self.verbose, self.logdir, "cts-exec") self._test_tool_location = "cts-exec-helper" def _kill_daemons(self): killall([ "corosync", "pacemaker-fenced", "lt-pacemaker-fenced", "pacemaker-execd", "lt-pacemaker-execd", "cts-exec-helper", "lt-cts-exec-helper", "pacemaker-remoted", ]) def _start_daemons(self): if self._corosync: self._corosync.start(kill_first=True) # pylint: disable=consider-using-with self._fencer = subprocess.Popen(["pacemaker-fenced", "-s"]) cmd = [self._daemon_location, "-l", self.logpath] if self.verbose: cmd += ["-V"] # pylint: disable=consider-using-with self._daemon_process = subprocess.Popen(cmd) def clean_environment(self): """Clean up the host after running a test.""" if self._daemon_process: self._daemon_process.terminate() self._daemon_process.wait() if self.verbose: print("Daemon Output Start") with open(self.logpath, "rt", errors="replace", encoding="utf-8") as logfile: for line in logfile: print(line.strip()) print("Daemon Output End") if self._corosync: self._fencer.terminate() self._fencer.wait() self._corosync.stop() self._daemon_process = None self._fencer = None self._corosync = None def add_cmd(self, cmd=None, **kwargs): """Add a cts-exec-helper command to be executed as part of this test.""" if cmd is None: cmd = self._test_tool_location if cmd == self._test_tool_location: if self.verbose: kwargs["args"] += " -V " if self.tls: kwargs["args"] += " -S " kwargs["validate"] = False kwargs["check_rng"] = False kwargs["check_stderr"] = False Test.add_cmd(self, cmd, **kwargs) def run(self): """Execute this test.""" if self.tls and self._is_stonith_test: self._result_txt = "SKIPPED - '%s' - disabled when testing pacemaker_remote" % (self.name) print(self._result_txt) return Test.run(self) class ExecTests(Tests): """Collection of all pacemaker-execd regression tests.""" def __init__(self, **kwargs): """ Create a new ExecTests instance. Keyword arguments: tls -- Enable pacemaker-remoted. """ Tests.__init__(self, **kwargs) self.tls = kwargs.get("tls", False) self._action_timeout = " -t 9000 " self._installed_files = [] self._rsc_classes = self._setup_rsc_classes() print("Testing resource classes %r" % self._rsc_classes) if "lsb" in self._rsc_classes: service_agent = "LSBDummy" elif "systemd" in self._rsc_classes: service_agent = "pacemaker-cts-dummyd@3" else: service_agent = "unsupported" self._common_cmds = { "ocf_reg_line": '-c register_rsc -r ocf_test_rsc ' + self._action_timeout + ' -C ocf -P pacemaker -T Dummy', "ocf_reg_event": '-l "NEW_EVENT event_type:register rsc_id:ocf_test_rsc action:none rc:ok op_status:complete"', "ocf_unreg_line": '-c unregister_rsc -r ocf_test_rsc ' + self._action_timeout, "ocf_unreg_event": '-l "NEW_EVENT event_type:unregister rsc_id:ocf_test_rsc action:none rc:ok op_status:complete"', "ocf_start_line": '-c exec -r ocf_test_rsc -a start ' + self._action_timeout, "ocf_start_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:start rc:ok op_status:complete" ', "ocf_stop_line": '-c exec -r ocf_test_rsc -a stop ' + self._action_timeout, "ocf_stop_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:stop rc:ok op_status:complete" ', "ocf_monitor_line": '-c exec -r ocf_test_rsc -a monitor -i 2s ' + self._action_timeout, "ocf_monitor_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, "ocf_cancel_line": '-c cancel -r ocf_test_rsc -a monitor -i 2s ' + self._action_timeout, "ocf_cancel_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:Cancelled" ', "systemd_reg_line": '-c register_rsc -r systemd_test_rsc ' + self._action_timeout + ' -C systemd -T pacemaker-cts-dummyd@3', "systemd_reg_event": '-l "NEW_EVENT event_type:register rsc_id:systemd_test_rsc action:none rc:ok op_status:complete"', "systemd_unreg_line": '-c unregister_rsc -r systemd_test_rsc ' + self._action_timeout, "systemd_unreg_event": '-l "NEW_EVENT event_type:unregister rsc_id:systemd_test_rsc action:none rc:ok op_status:complete"', "systemd_start_line": '-c exec -r systemd_test_rsc -a start ' + self._action_timeout, "systemd_start_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:start rc:ok op_status:complete" ', "systemd_stop_line": '-c exec -r systemd_test_rsc -a stop ' + self._action_timeout, "systemd_stop_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:stop rc:ok op_status:complete" ', "systemd_monitor_line": '-c exec -r systemd_test_rsc -a monitor -i 2s ' + self._action_timeout, "systemd_monitor_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:complete" -t 15000 ', "systemd_cancel_line": '-c cancel -r systemd_test_rsc -a monitor -i 2s ' + self._action_timeout, "systemd_cancel_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:Cancelled" ', - "upstart_reg_line": '-c register_rsc -r upstart_test_rsc ' + self._action_timeout + ' -C upstart -T pacemaker-cts-dummyd', - "upstart_reg_event": '-l "NEW_EVENT event_type:register rsc_id:upstart_test_rsc action:none rc:ok op_status:complete"', - "upstart_unreg_line": '-c unregister_rsc -r upstart_test_rsc ' + self._action_timeout, - "upstart_unreg_event": '-l "NEW_EVENT event_type:unregister rsc_id:upstart_test_rsc action:none rc:ok op_status:complete"', - "upstart_start_line": '-c exec -r upstart_test_rsc -a start ' + self._action_timeout, - "upstart_start_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:start rc:ok op_status:complete" ', - "upstart_stop_line": '-c exec -r upstart_test_rsc -a stop ' + self._action_timeout, - "upstart_stop_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:stop rc:ok op_status:complete" ', - "upstart_monitor_line": '-c exec -r upstart_test_rsc -a monitor -i 2s ' + self._action_timeout, - "upstart_monitor_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:complete" -t 15000', - "upstart_cancel_line": '-c cancel -r upstart_test_rsc -a monitor -i 2s ' + self._action_timeout, - "upstart_cancel_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:Cancelled" ', - "service_reg_line": '-c register_rsc -r service_test_rsc ' + self._action_timeout + ' -C service -T %s' % service_agent, "service_reg_event": '-l "NEW_EVENT event_type:register rsc_id:service_test_rsc action:none rc:ok op_status:complete"', "service_unreg_line": '-c unregister_rsc -r service_test_rsc ' + self._action_timeout, "service_unreg_event": '-l "NEW_EVENT event_type:unregister rsc_id:service_test_rsc action:none rc:ok op_status:complete"', "service_start_line": '-c exec -r service_test_rsc -a start ' + self._action_timeout, "service_start_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:start rc:ok op_status:complete" ', "service_stop_line": '-c exec -r service_test_rsc -a stop ' + self._action_timeout, "service_stop_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:stop rc:ok op_status:complete" ', "service_monitor_line": '-c exec -r service_test_rsc -a monitor -i 2s ' + self._action_timeout, "service_monitor_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, "service_cancel_line": '-c cancel -r service_test_rsc -a monitor -i 2s ' + self._action_timeout, "service_cancel_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:Cancelled" ', "lsb_reg_line": '-c register_rsc -r lsb_test_rsc ' + self._action_timeout + ' -C lsb -T LSBDummy', "lsb_reg_event": '-l "NEW_EVENT event_type:register rsc_id:lsb_test_rsc action:none rc:ok op_status:complete" ', "lsb_unreg_line": '-c unregister_rsc -r lsb_test_rsc ' + self._action_timeout, "lsb_unreg_event": '-l "NEW_EVENT event_type:unregister rsc_id:lsb_test_rsc action:none rc:ok op_status:complete"', "lsb_start_line": '-c exec -r lsb_test_rsc -a start ' + self._action_timeout, "lsb_start_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:start rc:ok op_status:complete" ', "lsb_stop_line": '-c exec -r lsb_test_rsc -a stop ' + self._action_timeout, "lsb_stop_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:stop rc:ok op_status:complete" ', "lsb_monitor_line": '-c exec -r lsb_test_rsc -a status -i 2s ' + self._action_timeout, "lsb_monitor_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:complete" ' + self._action_timeout, "lsb_cancel_line": '-c cancel -r lsb_test_rsc -a status -i 2s ' + self._action_timeout, "lsb_cancel_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:Cancelled" ', "stonith_reg_line": '-c register_rsc -r stonith_test_rsc ' + self._action_timeout + ' -C stonith -P pacemaker -T fence_dummy', "stonith_reg_event": '-l "NEW_EVENT event_type:register rsc_id:stonith_test_rsc action:none rc:ok op_status:complete" ', "stonith_unreg_line": '-c unregister_rsc -r stonith_test_rsc ' + self._action_timeout, "stonith_unreg_event": '-l "NEW_EVENT event_type:unregister rsc_id:stonith_test_rsc action:none rc:ok op_status:complete"', "stonith_start_line": '-c exec -r stonith_test_rsc -a start ' + self._action_timeout, "stonith_start_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:start rc:ok op_status:complete" ', "stonith_stop_line": '-c exec -r stonith_test_rsc -a stop ' + self._action_timeout, "stonith_stop_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:stop rc:ok op_status:complete" ', "stonith_monitor_line": '-c exec -r stonith_test_rsc -a monitor -i 2s ' + self._action_timeout, "stonith_monitor_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, "stonith_cancel_line": '-c cancel -r stonith_test_rsc -a monitor -i 2s ' + self._action_timeout, "stonith_cancel_event": '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:Cancelled" ', } def _setup_rsc_classes(self): """Determine which resource classes are supported.""" classes = stdout_from_command(["crm_resource", "--list-standards"]) # Strip trailing empty line classes = classes[:-1] if self.tls: classes.remove("stonith") if "nagios" in classes: classes.remove("nagios") if "systemd" in classes: try: # This code doesn't need this import, but pacemaker-cts-dummyd # does, so ensure the dependency is available rather than cause # all systemd tests to fail. # pylint: disable=import-outside-toplevel,unused-import import systemd.daemon except ImportError: print("Python systemd bindings not found.") print("The tests for systemd class are not going to be run.") classes.remove("systemd") return classes def new_test(self, name, description): """Create a named test.""" test = ExecTest(name, description, verbose=self.verbose, tls=self.tls, timeout=self.timeout, force_wait=self.force_wait, logdir=self.logdir) self._tests.append(test) return test def setup_environment(self): """Prepare the host before executing any tests.""" if BuildOptions.REMOTE_ENABLED: os.system("service pacemaker_remote stop") self.cleanup_environment() if self.tls and not os.path.isfile("/etc/pacemaker/authkey"): print("Installing /etc/pacemaker/authkey ...") os.system("mkdir -p /etc/pacemaker") os.system("dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1") self._installed_files.append("/etc/pacemaker/authkey") # If we're in build directory, install agents if not already installed # pylint: disable=protected-access if os.path.exists("%s/cts/cts-exec.in" % BuildOptions._BUILD_DIR): if not os.path.exists("%s/pacemaker" % BuildOptions.OCF_RA_INSTALL_DIR): # @TODO remember which components were created and remove them os.makedirs("%s/pacemaker" % BuildOptions.OCF_RA_INSTALL_DIR, 0o755) for agent in ["Dummy", "Stateful", "ping"]: agent_source = "%s/extra/resources/%s" % (BuildOptions._BUILD_DIR, agent) agent_dest = "%s/pacemaker/%s" % (BuildOptions.OCF_RA_INSTALL_DIR, agent) if not os.path.exists(agent_dest): print("Installing %s ..." % agent_dest) shutil.copyfile(agent_source, agent_dest) os.chmod(agent_dest, EXECMODE) self._installed_files.append(agent_dest) subprocess.call(["cts-support", "install"]) def cleanup_environment(self): """Clean up the host after executing desired tests.""" for installed_file in self._installed_files: print("Removing %s ..." % installed_file) os.remove(installed_file) subprocess.call(["cts-support", "uninstall"]) def _build_cmd_str(self, rsc, ty): """Construct a command string for the given resource and type.""" return "%s %s" % (self._common_cmds["%s_%s_line" % (rsc, ty)], self._common_cmds["%s_%s_event" % (rsc, ty)]) def build_generic_tests(self): """Register tests that apply to all resource classes.""" common_cmds = self._common_cmds # register/unregister tests for rsc in self._rsc_classes: test = self.new_test("generic_registration_%s" % rsc, "Simple resource registration test for %s standard" % rsc) test.add_cmd(args=self._build_cmd_str(rsc, "reg")) test.add_cmd(args=self._build_cmd_str(rsc, "unreg")) # start/stop tests for rsc in self._rsc_classes: test = self.new_test("generic_start_stop_%s" % rsc, "Simple start and stop test for %s standard" % rsc) test.add_cmd(args=self._build_cmd_str(rsc, "reg")) test.add_cmd(args=self._build_cmd_str(rsc, "start")) test.add_cmd(args=self._build_cmd_str(rsc, "stop")) test.add_cmd(args=self._build_cmd_str(rsc, "unreg")) # monitor cancel test for rsc in self._rsc_classes: test = self.new_test("generic_monitor_cancel_%s" % rsc, "Simple monitor cancel test for %s standard" % rsc) test.add_cmd(args=self._build_cmd_str(rsc, "reg")) test.add_cmd(args=self._build_cmd_str(rsc, "start")) test.add_cmd(args=self._build_cmd_str(rsc, "monitor")) # If this fails, that means the monitor may not be getting rescheduled test.add_cmd(args=common_cmds["%s_monitor_event" % rsc]) # If this fails, that means the monitor may not be getting rescheduled test.add_cmd(args=common_cmds["%s_monitor_event" % rsc]) test.add_cmd(args=self._build_cmd_str(rsc, "cancel")) # If this happens the monitor did not actually cancel correctly test.add_cmd(args=common_cmds["%s_monitor_event" % rsc], expected_exitcode=ExitStatus.TIMEOUT) # If this happens the monitor did not actually cancel correctly test.add_cmd(args=common_cmds["%s_monitor_event" % rsc], expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args=self._build_cmd_str(rsc, "stop")) test.add_cmd(args=self._build_cmd_str(rsc, "unreg")) # monitor duplicate test for rsc in self._rsc_classes: test = self.new_test("generic_monitor_duplicate_%s" % rsc, "Test creation and canceling of duplicate monitors for %s standard" % rsc) test.add_cmd(args=self._build_cmd_str(rsc, "reg")) test.add_cmd(args=self._build_cmd_str(rsc, "start")) test.add_cmd(args=self._build_cmd_str(rsc, "monitor")) # If this fails, that means the monitor may not be getting rescheduled test.add_cmd(args=common_cmds["%s_monitor_event" % rsc]) # If this fails, that means the monitor may not be getting rescheduled test.add_cmd(args=common_cmds["%s_monitor_event" % rsc]) # Add the duplicate monitors test.add_cmd(args=self._build_cmd_str(rsc, "monitor")) test.add_cmd(args=self._build_cmd_str(rsc, "monitor")) test.add_cmd(args=self._build_cmd_str(rsc, "monitor")) test.add_cmd(args=self._build_cmd_str(rsc, "monitor")) # verify we still get update events # If this fails, that means the monitor may not be getting rescheduled test.add_cmd(args=common_cmds["%s_monitor_event" % rsc]) # cancel the monitor, if the duplicate merged with the original, we should no longer see monitor updates test.add_cmd(args=self._build_cmd_str(rsc, "cancel")) # If this happens the monitor did not actually cancel correctly test.add_cmd(args=common_cmds["%s_monitor_event" % rsc], expected_exitcode=ExitStatus.TIMEOUT) # If this happens the monitor did not actually cancel correctly test.add_cmd(args=common_cmds["%s_monitor_event" % rsc], expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args=self._build_cmd_str(rsc, "stop")) test.add_cmd(args=self._build_cmd_str(rsc, "unreg")) # stop implies cancel test for rsc in self._rsc_classes: test = self.new_test("generic_stop_implies_cancel_%s" % rsc, "Verify stopping a resource implies cancel of recurring ops for %s standard" % rsc) test.add_cmd(args=self._build_cmd_str(rsc, "reg")) test.add_cmd(args=self._build_cmd_str(rsc, "start")) test.add_cmd(args=self._build_cmd_str(rsc, "monitor")) # If this fails, that means the monitor may not be getting rescheduled test.add_cmd(args=common_cmds["%s_monitor_event" % rsc]) # If this fails, that means the monitor may not be getting rescheduled test.add_cmd(args=common_cmds["%s_monitor_event" % rsc]) test.add_cmd(args=self._build_cmd_str(rsc, "stop")) # If this happens the monitor did not actually cancel correctly test.add_cmd(args=common_cmds["%s_monitor_event" % rsc], expected_exitcode=ExitStatus.TIMEOUT) # If this happens the monitor did not actually cancel correctly test.add_cmd(args=common_cmds["%s_monitor_event" % rsc], expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args=self._build_cmd_str(rsc, "unreg")) def build_multi_rsc_tests(self): """Register complex tests that involve managing multiple resouces of different types.""" common_cmds = self._common_cmds # do not use service and systemd at the same time, it is the same resource. # register start monitor stop unregister resources of each type at the same time test = self.new_test("multi_rsc_start_stop_all_including_stonith", "Start, monitor, and stop resources of multiple types and classes") for rsc in self._rsc_classes: test.add_cmd(args=self._build_cmd_str(rsc, "reg")) for rsc in self._rsc_classes: test.add_cmd(args=self._build_cmd_str(rsc, "start")) for rsc in self._rsc_classes: test.add_cmd(args=self._build_cmd_str(rsc, "monitor")) for rsc in self._rsc_classes: # If this fails, that means the monitor is not being rescheduled test.add_cmd(args=common_cmds["%s_monitor_event" % rsc]) for rsc in self._rsc_classes: test.add_cmd(args=self._build_cmd_str(rsc, "cancel")) for rsc in self._rsc_classes: test.add_cmd(args=self._build_cmd_str(rsc, "stop")) for rsc in self._rsc_classes: test.add_cmd(args=self._build_cmd_str(rsc, "unreg")) def build_negative_tests(self): """Register tests related to how pacemaker-execd handles failures.""" # ocf start timeout test test = self.new_test("ocf_start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd(args='-c register_rsc -r test_rsc -C ocf -P pacemaker -T Dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') # -t must be less than self._action_timeout test.add_cmd(args='-c exec -r test_rsc -a start -k op_sleep -v 5 -t 1000 -w') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:error op_status:Timed Out" ' + self._action_timeout) test.add_cmd(args='-c exec -r test_rsc -a stop ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete" ') test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # stonith start timeout test test = self.new_test("stonith_start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd(args='-c register_rsc -r test_rsc -C stonith -P pacemaker -T fence_dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete"') # -t must be less than self._action_timeout test.add_cmd(args='-c exec -r test_rsc -a start -k monitor_delay -v 30 -t 1000 -w') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:error op_status:Timed Out" ' + self._action_timeout) test.add_cmd(args='-c exec -r test_rsc -a stop ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete" ') test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # stonith component fail test = self.new_test("stonith_component_fail", "Kill stonith component after pacemaker-execd connects") test.add_cmd(args=self._build_cmd_str("stonith", "reg")) test.add_cmd(args=self._build_cmd_str("stonith", "start")) test.add_cmd(args='-c exec -r stonith_test_rsc -a monitor -i 600s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:error op_status:error" -t 15000', kill="killall -9 -q pacemaker-fenced lt-pacemaker-fenced") test.add_cmd(args=self._build_cmd_str("stonith", "unreg")) # monitor fail for ocf resources test = self.new_test("monitor_fail_ocf", "Force ocf monitor to fail, verify failure is reported.") test.add_cmd(args='-c register_rsc -r test_rsc -C ocf -P pacemaker -T Dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' + self._action_timeout) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' + self._action_timeout) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete" ' + self._action_timeout, kill="rm -f %s/run/Dummy-test_rsc.state" % BuildOptions.LOCAL_STATE_DIR) test.add_cmd(args='-c cancel -r test_rsc -a monitor -i 1s ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled" ') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete" ' + self._action_timeout, expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # verify notify changes only for monitor operation test = self.new_test("monitor_changes_only", "Verify when flag is set, only monitor changes are notified.") test.add_cmd(args='-c register_rsc -r test_rsc -C ocf -P pacemaker -T Dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + ' -o ' '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + ' -o -l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self._action_timeout, kill='rm -f %s/run/Dummy-test_rsc.state' % BuildOptions.LOCAL_STATE_DIR) test.add_cmd(args='-c cancel -r test_rsc -a monitor -i 1s' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled" ') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete" ' + self._action_timeout, expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete"') # monitor fail for systemd resource if "systemd" in self._rsc_classes: test = self.new_test("monitor_fail_systemd", "Force systemd monitor to fail, verify failure is reported..") test.add_cmd(args='-c register_rsc -r test_rsc -C systemd -T pacemaker-cts-dummyd@3 ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self._action_timeout, kill="pkill -9 -f pacemaker-cts-dummyd") test.add_cmd(args='-c cancel -r test_rsc -a monitor -i 1s' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled" ') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete" ' + self._action_timeout, expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') - # monitor fail for upstart resource - if "upstart" in self._rsc_classes: - test = self.new_test("monitor_fail_upstart", "Force upstart monitor to fail, verify failure is reported") - test.add_cmd(args='-c register_rsc -r test_rsc -C upstart -T pacemaker-cts-dummyd ' + self._action_timeout - + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') - test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout - + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') - test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout - + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') - test.add_cmd(args='-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout - + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ') - test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout) - test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout) - test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self._action_timeout, - kill='killall -9 -q dd') - test.add_cmd(args='-c cancel -r test_rsc -a monitor -i 1s' + self._action_timeout - + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled" ') - test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete" ' + self._action_timeout, - expected_exitcode=ExitStatus.TIMEOUT) - test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, - expected_exitcode=ExitStatus.TIMEOUT) - test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout - + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') - # Cancel non-existent operation on a resource test = self.new_test("cancel_non_existent_op", "Attempt to cancel the wrong monitor operation, verify expected failure") test.add_cmd(args='-c register_rsc -r test_rsc -C ocf -P pacemaker -T Dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout) # interval is wrong, should fail test.add_cmd(args='-c cancel -r test_rsc -a monitor -i 2s' + self._action_timeout + ' -l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled" ', expected_exitcode=ExitStatus.ERROR) # action name is wrong, should fail test.add_cmd(args='-c cancel -r test_rsc -a stop -i 1s' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled" ', expected_exitcode=ExitStatus.ERROR) test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # Attempt to invoke non-existent rsc id test = self.new_test("invoke_non_existent_rsc", "Attempt to perform operations on a non-existent rsc id.") test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:error op_status:complete" ', expected_exitcode=ExitStatus.ERROR) test.add_cmd(args='-c exec -r test_rsc -a stop ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete" ', expected_exitcode=ExitStatus.ERROR) test.add_cmd(args='-c exec -r test_rsc -a monitor -i 6s ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ', expected_exitcode=ExitStatus.ERROR) test.add_cmd(args='-c cancel -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled" ', expected_exitcode=ExitStatus.ERROR) test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # Register and start a resource that doesn't exist, systemd if "systemd" in self._rsc_classes: test = self.new_test("start_uninstalled_systemd", "Register uninstalled systemd agent, try to start, verify expected failure") test.add_cmd(args='-c register_rsc -r test_rsc -C systemd -T this_is_fake1234 ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed" ') test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') - if "upstart" in self._rsc_classes: - test = self.new_test("start_uninstalled_upstart", "Register uninstalled upstart agent, try to start, verify expected failure") - test.add_cmd(args='-c register_rsc -r test_rsc -C upstart -T this_is_fake1234 ' + self._action_timeout - + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') - test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout - + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed" ') - test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout - + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') - # Register and start a resource that doesn't exist, ocf test = self.new_test("start_uninstalled_ocf", "Register uninstalled ocf agent, try to start, verify expected failure.") test.add_cmd(args='-c register_rsc -r test_rsc -C ocf -P pacemaker -T this_is_fake1234 ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed" ') test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # Register ocf with non-existent provider test = self.new_test("start_ocf_bad_provider", "Register ocf agent with a non-existent provider, verify expected failure.") test.add_cmd(args='-c register_rsc -r test_rsc -C ocf -P pancakes -T Dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed" ') test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # Register ocf with empty provider field test = self.new_test("start_ocf_no_provider", "Register ocf agent with a no provider, verify expected failure.") test.add_cmd(args='-c register_rsc -r test_rsc -C ocf -T Dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ', expected_exitcode=ExitStatus.ERROR) test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Error" ', expected_exitcode=ExitStatus.ERROR) test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') def build_stress_tests(self): """Register stress tests.""" timeout = "-t 20000" iterations = 25 test = self.new_test("ocf_stress", "Verify OCF agent handling works under load") for i in range(iterations): test.add_cmd(args='-c register_rsc -r rsc_%s %s -C ocf -P heartbeat -T Dummy -l "NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete"' % (i, timeout, i)) test.add_cmd(args='-c exec -r rsc_%s -a start %s -l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete"' % (i, timeout, i)) test.add_cmd(args='-c exec -r rsc_%s -a monitor %s -i 1s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete"' % (i, timeout, i)) for i in range(iterations): test.add_cmd(args='-c exec -r rsc_%s -a stop %s -l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete"' % (i, timeout, i)) test.add_cmd(args='-c unregister_rsc -r rsc_%s %s -l "NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete"' % (i, timeout, i)) if "systemd" in self._rsc_classes: test = self.new_test("systemd_stress", "Verify systemd dbus connection works under load") for i in range(iterations): test.add_cmd(args='-c register_rsc -r rsc_%s %s -C systemd -T pacemaker-cts-dummyd@3 -l "NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete"' % (i, timeout, i)) test.add_cmd(args='-c exec -r rsc_%s -a start %s -l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete"' % (i, timeout, i)) test.add_cmd(args='-c exec -r rsc_%s -a monitor %s -i 1s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete"' % (i, timeout, i)) for i in range(iterations): test.add_cmd(args='-c exec -r rsc_%s -a stop %s -l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete"' % (i, timeout, i)) test.add_cmd(args='-c unregister_rsc -r rsc_%s %s -l "NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete"' % (i, timeout, i)) iterations = 9 timeout = "-t 30000" # Verify recurring op in-flight collision is handled in series properly test = self.new_test("rsc_inflight_collision", "Verify recurring ops do not collide with other operations for the same rsc.") test.add_cmd(args='-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy ' '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ' + self._action_timeout) test.add_cmd(args='-c exec -r test_rsc -a start %s -k op_sleep -v 1 -l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete"' % timeout) for i in range(iterations): test.add_cmd(args='-c exec -r test_rsc -a monitor %s -i 100%dms -k op_sleep -v 2 ' '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' % (timeout, i)) test.add_cmd(args='-c exec -r test_rsc -a stop %s -l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete"' % timeout) test.add_cmd(args='-c unregister_rsc -r test_rsc %s -l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete"' % timeout) def build_custom_tests(self): """Register tests that target specific cases.""" # verify resource temporary folder is created and used by OCF agents test = self.new_test("rsc_tmp_dir", "Verify creation and use of rsc temporary state directory") test.add_cmd("ls", args="-al %s" % BuildOptions.RSC_TMP_DIR) test.add_cmd(args='-c register_rsc -r test_rsc -P heartbeat -C ocf -T Dummy ' '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ' + self._action_timeout) test.add_cmd(args='-c exec -r test_rsc -a start -t 4000') test.add_cmd("ls", args="-al %s" % BuildOptions.RSC_TMP_DIR) test.add_cmd("ls", args="%s/Dummy-test_rsc.state" % BuildOptions.RSC_TMP_DIR) test.add_cmd(args='-c exec -r test_rsc -a stop -t 4000') test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # start delay then stop test test = self.new_test("start_delay", "Verify start delay works as expected.") test.add_cmd(args='-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy ' '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ' + self._action_timeout) test.add_cmd(args='-c exec -r test_rsc -s 6000 -a start -w -t 6000') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" -t 2000', expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" -t 6000') test.add_cmd(args='-c exec -r test_rsc -a stop ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete" ') test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # start delay, but cancel before it gets a chance to start test = self.new_test("start_delay_cancel", "Using start_delay, start a rsc, but cancel the start op before execution.") test.add_cmd(args='-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy ' '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ' + self._action_timeout) test.add_cmd(args='-c exec -r test_rsc -s 5000 -a start -w -t 4000') test.add_cmd(args='-c cancel -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled" ') test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" -t 5000', expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # Register a bunch of resources, verify we can get info on them test = self.new_test("verify_get_rsc_info", "Register multiple resources, verify retrieval of rsc info.") if "systemd" in self._rsc_classes: test.add_cmd(args='-c register_rsc -r rsc1 -C systemd -T pacemaker-cts-dummyd@3 ' + self._action_timeout) test.add_cmd(args='-c get_rsc_info -r rsc1 ') test.add_cmd(args='-c unregister_rsc -r rsc1 ' + self._action_timeout) test.add_cmd(args='-c get_rsc_info -r rsc1 ', expected_exitcode=ExitStatus.ERROR) - if "upstart" in self._rsc_classes: - test.add_cmd(args='-c register_rsc -r rsc1 -C upstart -T pacemaker-cts-dummyd ' + self._action_timeout) - test.add_cmd(args='-c get_rsc_info -r rsc1 ') - test.add_cmd(args='-c unregister_rsc -r rsc1 ' + self._action_timeout) - test.add_cmd(args='-c get_rsc_info -r rsc1 ', expected_exitcode=ExitStatus.ERROR) - test.add_cmd(args='-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker ' + self._action_timeout) test.add_cmd(args='-c get_rsc_info -r rsc2 ') test.add_cmd(args='-c unregister_rsc -r rsc2 ' + self._action_timeout) test.add_cmd(args='-c get_rsc_info -r rsc2 ', expected_exitcode=ExitStatus.ERROR) # Register duplicate, verify only one entry exists and can still be removed test = self.new_test("duplicate_registration", "Register resource multiple times, verify only one entry exists and can be removed.") test.add_cmd(args='-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker ' + self._action_timeout) test.add_cmd(args="-c get_rsc_info -r rsc2 ", stdout_match="id:rsc2 class:ocf provider:pacemaker type:Dummy") test.add_cmd(args='-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker ' + self._action_timeout) test.add_cmd(args="-c get_rsc_info -r rsc2 ", stdout_match="id:rsc2 class:ocf provider:pacemaker type:Dummy") test.add_cmd(args='-c register_rsc -r rsc2 -C ocf -T Stateful -P pacemaker ' + self._action_timeout) test.add_cmd(args="-c get_rsc_info -r rsc2 ", stdout_match="id:rsc2 class:ocf provider:pacemaker type:Stateful") test.add_cmd(args='-c unregister_rsc -r rsc2 ' + self._action_timeout) test.add_cmd(args='-c get_rsc_info -r rsc2 ', expected_exitcode=ExitStatus.ERROR) # verify the option to only send notification to the original client test = self.new_test("notify_orig_client_only", "Verify option to only send notifications to the client originating the action.") test.add_cmd(args='-c register_rsc -r test_rsc -C ocf -P pacemaker -T Dummy ' + self._action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a start ' + self._action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete" ') test.add_cmd(args='-c exec -r test_rsc -a monitor -i 1s ' + self._action_timeout + ' -n ' '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"') # this will fail because the monitor notifications should only go to the original caller, which no longer exists. test.add_cmd(args='-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ' + self._action_timeout, expected_exitcode=ExitStatus.TIMEOUT) test.add_cmd(args='-c cancel -r test_rsc -a monitor -i 1s -t 6000 ') test.add_cmd(args='-c unregister_rsc -r test_rsc ' + self._action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete" ') # get metadata test = self.new_test("get_ocf_metadata", "Retrieve metadata for a resource") test.add_cmd(args="-c metadata -C ocf -P pacemaker -T Dummy", stdout_match="resource-agent name=\"Dummy\"") test.add_cmd(args="-c metadata -C ocf -P pacemaker -T Stateful") test.add_cmd(args="-c metadata -P pacemaker -T Stateful", expected_exitcode=ExitStatus.ERROR) test.add_cmd(args="-c metadata -C ocf -P pacemaker -T fake_agent", expected_exitcode=ExitStatus.ERROR) # get stonith metadata test = self.new_test("get_stonith_metadata", "Retrieve stonith metadata for a resource") test.add_cmd(args="-c metadata -C stonith -P pacemaker -T fence_dummy", stdout_match="resource-agent name=\"fence_dummy\"") # get lsb metadata if "lsb" in self._rsc_classes: test = self.new_test("get_lsb_metadata", "Retrieve metadata for an LSB resource") test.add_cmd(args="-c metadata -C lsb -T LSBDummy", stdout_match="resource-agent name='LSBDummy'") # get metadata if "systemd" in self._rsc_classes: test = self.new_test("get_systemd_metadata", "Retrieve metadata for a resource") test.add_cmd(args="-c metadata -C systemd -T pacemaker-cts-dummyd@", stdout_match="resource-agent name=\"pacemaker-cts-dummyd@\"") - # get metadata - if "upstart" in self._rsc_classes: - test = self.new_test("get_upstart_metadata", "Retrieve metadata for a resource") - test.add_cmd(args="-c metadata -C upstart -T pacemaker-cts-dummyd", - stdout_match="resource-agent name=\"pacemaker-cts-dummyd\"") - # get ocf providers test = self.new_test("list_ocf_providers", "Retrieve list of available resource providers, verifies pacemaker is a provider.") test.add_cmd(args="-c list_ocf_providers ", stdout_match="pacemaker") test.add_cmd(args="-c list_ocf_providers -T ping", stdout_match="pacemaker") # Verify agents only exist in their lists test = self.new_test("verify_agent_lists", "Verify the agent lists contain the right data.") if "ocf" in self._rsc_classes: test.add_cmd(args="-c list_agents ", stdout_match="Stateful") test.add_cmd(args="-c list_agents -C ocf", stdout_match="Stateful", stdout_no_match="pacemaker-cts-dummyd@|fence_dummy") if "service" in self._rsc_classes: test.add_cmd(args="-c list_agents -C service", stdout_match="", stdout_no_match="Stateful|fence_dummy") if "lsb" in self._rsc_classes: test.add_cmd(args="-c list_agents", stdout_match="LSBDummy") test.add_cmd(args="-c list_agents -C lsb", stdout_match="LSBDummy", stdout_no_match="pacemaker-cts-dummyd@|Stateful|fence_dummy") test.add_cmd(args="-c list_agents -C service", stdout_match="LSBDummy") if "systemd" in self._rsc_classes: test.add_cmd(args="-c list_agents ", stdout_match="pacemaker-cts-dummyd@") # systemd test.add_cmd(args="-c list_agents -C systemd", stdout_match="", stdout_no_match="Stateful") # should not exist test.add_cmd(args="-c list_agents -C systemd", stdout_match="pacemaker-cts-dummyd@") test.add_cmd(args="-c list_agents -C systemd", stdout_match="", stdout_no_match="fence_dummy") # should not exist - if "upstart" in self._rsc_classes: - test.add_cmd(args="-c list_agents ", stdout_match="pacemaker-cts-dummyd") # upstart - test.add_cmd(args="-c list_agents -C upstart", stdout_match="", stdout_no_match="Stateful") # should not exist - test.add_cmd(args="-c list_agents -C upstart", stdout_match="pacemaker-cts-dummyd") - test.add_cmd(args="-c list_agents -C upstart", stdout_match="", stdout_no_match="fence_dummy") # should not exist - if "stonith" in self._rsc_classes: test.add_cmd(args="-c list_agents -C stonith", stdout_match="fence_dummy") # stonith test.add_cmd(args="-c list_agents -C stonith", stdout_match="", # should not exist stdout_no_match="pacemaker-cts-dummyd@") test.add_cmd(args="-c list_agents -C stonith", stdout_match="", stdout_no_match="Stateful") # should not exist test.add_cmd(args="-c list_agents ", stdout_match="fence_dummy") def build_options(): """Handle command line arguments.""" parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="Run pacemaker-execd regression tests", epilog="Example: Run only the test 'start_stop'\n" "\t " + sys.argv[0] + " --run-only start_stop\n\n" "Example: Run only the tests with the string 'systemd' present in them\n" "\t " + sys.argv[0] + " --run-only-pattern systemd") parser.add_argument("-l", "--list-tests", action="store_true", help="Print out all registered tests") parser.add_argument("-p", "--run-only-pattern", metavar='PATTERN', help="Run only tests matching the given pattern") parser.add_argument("-r", "--run-only", metavar='TEST', help="Run a specific test") parser.add_argument("-t", "--timeout", type=float, default=2, help="Up to how many seconds each test case waits for the daemon to " "be initialized. Defaults to 2. The value 0 means no limit.") parser.add_argument("-w", "--force-wait", action="store_true", help="Each test case waits the default/specified --timeout for the " "daemon without tracking the log") if BuildOptions.REMOTE_ENABLED: parser.add_argument("-R", "--pacemaker-remote", action="store_true", help="Test pacemaker-remoted binary instead of pacemaker-execd") parser.add_argument("-V", "--verbose", action="store_true", help="Verbose output") args = parser.parse_args() return args def main(): """Run pacemaker-execd regression tests as specified by arguments.""" update_path() # Ensure all command output is in portable locale for comparison os.environ['LC_ALL'] = "C" opts = build_options() if opts.pacemaker_remote: exit_if_proc_running("pacemaker-remoted") else: exit_if_proc_running("corosync") exit_if_proc_running("pacemaker-execd") exit_if_proc_running("pacemaker-fenced") # Create a temporary directory for log files (the directory will # automatically be erased when done) with tempfile.TemporaryDirectory(prefix="cts-exec-") as logdir: tests = ExecTests(verbose=opts.verbose, tls=opts.pacemaker_remote, timeout=opts.timeout, force_wait=opts.force_wait, logdir=logdir) tests.build_generic_tests() tests.build_multi_rsc_tests() tests.build_negative_tests() tests.build_custom_tests() tests.build_stress_tests() if opts.list_tests: tests.print_list() sys.exit(ExitStatus.OK) print("Starting ...") tests.setup_environment() if opts.run_only_pattern: tests.run_tests_matching(opts.run_only_pattern) tests.print_results() elif opts.run_only: tests.run_single(opts.run_only) tests.print_results() else: tests.run_tests() tests.print_results() tests.cleanup_environment() tests.exit() if __name__ == "__main__": main() diff --git a/cts/support/Makefile.am b/cts/support/Makefile.am index 17351171d5..8071711a9f 100644 --- a/cts/support/Makefile.am +++ b/cts/support/Makefile.am @@ -1,28 +1,25 @@ # # Copyright 2021-2024 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/python.mk MAINTAINERCLEANFILES = Makefile.in # Commands intended to be run only via other commands halibdir = $(CRM_DAEMON_DIR) dist_halib_SCRIPTS = cts-support ctsdir = $(datadir)/$(PACKAGE)/tests/cts cts_DATA = pacemaker-cts-dummyd@.service dist_cts_DATA = cts.conf -if BUILD_UPSTART -dist_cts_DATA += pacemaker-cts-dummyd.conf -endif cts_SCRIPTS = fence_dummy \ LSBDummy \ pacemaker-cts-dummyd PYCHECKFILES ?= cts-support fence_dummy pacemaker-cts-dummyd diff --git a/cts/support/pacemaker-cts-dummyd.conf b/cts/support/pacemaker-cts-dummyd.conf deleted file mode 100644 index 3ecb1acd86..0000000000 --- a/cts/support/pacemaker-cts-dummyd.conf +++ /dev/null @@ -1,2 +0,0 @@ -description "Dummy Upstart service for Pacemaker's Cluster Test Suite" -exec dd if=/dev/random of=/dev/null diff --git a/daemons/pacemakerd/pacemaker.combined.upstart.in b/daemons/pacemakerd/pacemaker.combined.upstart.in deleted file mode 100644 index af59ff05d1..0000000000 --- a/daemons/pacemakerd/pacemaker.combined.upstart.in +++ /dev/null @@ -1,67 +0,0 @@ -# pacemaker-corosync - High-Availability cluster -# -# Starts Corosync cluster engine and Pacemaker cluster manager. - -# if you use automatic start, uncomment the line below. -#start on started local and runlevel [2345] - -stop on runlevel [0123456] -kill timeout 3600 -respawn - -env prog=pacemakerd -env sysconf=@CONFIGDIR@/pacemaker -env rpm_lockdir=@localstatedir@/lock/subsys -env deb_lockdir=@localstatedir@/lock - -script - [ -f "$sysconf" ] && . "$sysconf" - exec $prog -end script - -pre-start script - pidof corosync || start corosync - - # if you use corosync-notifyd, uncomment the line below. - #start corosync-notifyd - - # give it time to fail. - sleep 2 - pidof corosync || { exit 1; } - - # if you use crm_mon, uncomment the line below. - #start crm_mon -end script - -post-start script - [ -f "$sysconf" ] && . "$sysconf" - [ -z "$LOCK_FILE" -a -d "$rpm_lockdir" ] && LOCK_FILE="$rpm_lockdir/pacemaker" - [ -z "$LOCK_FILE" -a -d "$deb_lockdir" ] && LOCK_FILE="$deb_lockdir/pacemaker" - touch "$LOCK_FILE" - pidof $prog > "@localstatedir@/run/$prog.pid" -end script - -post-stop script - [ -f "$sysconf" ] && . "$sysconf" - [ -z "$LOCK_FILE" -a -d "$rpm_lockdir" ] && LOCK_FILE="$rpm_lockdir/pacemaker" - [ -z "$LOCK_FILE" -a -d "$deb_lockdir" ] && LOCK_FILE="$deb_lockdir/pacemaker" - rm -f "$LOCK_FILE" - rm -f "@localstatedir@/run/$prog.pid" - - # if you use corosync-notifyd, uncomment the line below. - #stop corosync-notifyd || true - - # if you use watchdog of corosync, uncomment the line below. - #pidof corosync || false - - pidof pacemaker-controld || stop corosync - - # if you want to reboot a machine by watchdog of corosync when - # pacemakerd disappeared unexpectedly, uncomment the line below - # and invalidate above "respawn" stanza. - #pidof pacemaker-controld && killall -q -9 corosync - - # if you use crm_mon, uncomment the line below. - #stop crm_mon - -end script diff --git a/daemons/pacemakerd/pacemaker.upstart.in b/daemons/pacemakerd/pacemaker.upstart.in deleted file mode 100644 index 7a54bc0ea8..0000000000 --- a/daemons/pacemakerd/pacemaker.upstart.in +++ /dev/null @@ -1,33 +0,0 @@ -# pacemaker - High-Availability cluster resource manager -# -# Starts pacemakerd - -stop on runlevel [0123456] -kill timeout 3600 -respawn - -env prog=pacemakerd -env sysconf=@CONFIGDIR@/pacemaker -env rpm_lockdir=@localstatedir@/lock/subsys -env deb_lockdir=@localstatedir@/lock - -script - [ -f "$sysconf" ] && . "$sysconf" - exec $prog -end script - -post-start script - [ -f "$sysconf" ] && . "$sysconf" - [ -z "$LOCK_FILE" -a -d "$rpm_lockdir" ] && LOCK_FILE="$rpm_lockdir/pacemaker" - [ -z "$LOCK_FILE" -a -d "$deb_lockdir" ] && LOCK_FILE="$deb_lockdir/pacemaker" - touch "$LOCK_FILE" - pidof $prog > "@localstatedir@/run/$prog.pid" -end script - -post-stop script - [ -f "$sysconf" ] && . "$sysconf" - [ -z "$LOCK_FILE" -a -d "$rpm_lockdir" ] && LOCK_FILE="$rpm_lockdir/pacemaker" - [ -z "$LOCK_FILE" -a -d "$deb_lockdir" ] && LOCK_FILE="$deb_lockdir/pacemaker" - rm -f "$LOCK_FILE" - rm -f "@localstatedir@/run/$prog.pid" -end script diff --git a/doc/sphinx/Pacemaker_Explained/resources.rst b/doc/sphinx/Pacemaker_Explained/resources.rst index b0dca49670..053688ae0b 100644 --- a/doc/sphinx/Pacemaker_Explained/resources.rst +++ b/doc/sphinx/Pacemaker_Explained/resources.rst @@ -1,912 +1,879 @@ .. _resource: Resources --------- .. _s-resource-primitive: .. index:: single: resource A *resource* is a service managed by Pacemaker. The simplest type of resource, a *primitive*, is described in this chapter. More complex forms, such as groups and clones, are described in later chapters. Every primitive has a *resource agent* that provides Pacemaker a standardized interface for managing the service. This allows Pacemaker to be agnostic about the services it manages. Pacemaker doesn't need to understand how the service works because it relies on the resource agent to do the right thing when asked. Every resource has a *standard* (also called *class*) specifying the interface that its resource agent follows, and a *type* identifying the specific service being managed. .. _s-resource-supported: .. index:: single: resource; standard Resource Standards ################## Pacemaker can use resource agents complying with these standards, described in more detail below: * ocf * lsb * systemd * service * stonithd * nagios *(deprecated since 2.1.6)* -* upstart *(deprecated since 2.1.0)* Support for some standards is controlled by build options and so might not be available in any particular build of Pacemaker. The command ``crm_resource --list-standards`` will show which standards are supported by the local build. .. index:: single: resource; OCF single: OCF; resources single: Open Cluster Framework; resources Open Cluster Framework ______________________ The Open Cluster Framework (OCF) Resource Agent API is a ClusterLabs standard for managing services. It is the most preferred since it is specifically designed for use in a Pacemaker cluster. OCF agents are scripts that support a variety of actions including ``start``, ``stop``, and ``monitor``. They may accept parameters, making them more flexible than other standards. The number and purpose of parameters is left to the agent, which advertises them via the ``meta-data`` action. Unlike other standards, OCF agents have a *provider* as well as a standard and type. For more information, see the "Resource Agents" chapter of *Pacemaker Administration* and the `OCF standard `_. .. _s-resource-supported-systemd: .. index:: single: Resource; Systemd single: Systemd; resources Systemd _______ Most Linux distributions use `Systemd `_ for system initialization and service management. *Unit files* specify how to manage services and are usually provided by the distribution. Pacemaker can manage systemd services. Simply create a resource with ``systemd`` as the resource standard and the unit file name as the resource type. Do *not* run ``systemctl enable`` on the unit. .. important:: Make sure that any systemd services to be controlled by the cluster are *not* enabled to start at boot. .. index:: single: resource; LSB single: LSB; resources single: Linux Standard Base; resources Linux Standard Base ___________________ *LSB* resource agents, also known as `SysV-style `_, are scripts that provide start, stop, and status actions for a service. They are provided by some operating system distributions. If a full path is not given, they are assumed to be located in a directory specified when your Pacemaker software was built (usually ``/etc/init.d``). In order to be used with Pacemaker, they must conform to the `LSB specification `_ as it relates to init scripts. .. warning:: Some LSB scripts do not fully comply with the standard. For details on how to check whether your script is LSB-compatible, see the "Resource Agents" chapter of `Pacemaker Administration`. Common problems include: * Not implementing the ``status`` action * Not observing the correct exit status codes * Starting a started resource returns an error * Stopping a stopped resource returns an error .. important:: Make sure the host is *not* configured to start any LSB services at boot that will be controlled by the cluster. .. index:: single: Resource; System Services single: System Service; resources System Services _______________ -Since there are various types of system services (``systemd``, -``upstart``, and ``lsb``), Pacemaker supports a special ``service`` alias which -intelligently figures out which one applies to a given cluster node. +Since there is more than one type of system service (``systemd`` and ``lsb``), +Pacemaker supports a special ``service`` alias which intelligently figures out +which one applies to a given cluster node. -This is particularly useful when the cluster contains a mix of -``systemd``, ``upstart``, and ``lsb``. +This is particularly useful when the cluster contains a mix of ``systemd`` and +``lsb``. -In order, Pacemaker will try to find the named service as: - -* an LSB init script -* a Systemd unit file -* an Upstart job +If the ``service`` standard is specified, Pacemaker will try to find the named +service as an LSB init script, and if none exists, a systemd unit file. .. index:: single: Resource; STONITH single: STONITH; resources STONITH _______ The ``stonith`` standard is used for managing fencing devices, discussed later in :ref:`fencing`. .. index:: single: Resource; Nagios Plugins single: Nagios Plugins; resources Nagios Plugins ______________ Nagios Plugins are a way to monitor services. Pacemaker can use these as resources, to react to a change in the service's status. To use plugins as resources, Pacemaker must have been built with support, and OCF-style meta-data for the plugins must be installed on nodes that can run them. Meta-data for several common plugins is provided by the `nagios-agents-metadata `_ project. The supported parameters for such a resource are same as the long options of the plugin. Start and monitor actions for plugin resources are implemented as invoking the plugin. A plugin result of "OK" (0) is treated as success, a result of "WARN" (1) is treated as a successful but degraded service, and any other result is considered a failure. A plugin resource is not going to change its status after recovery by restarting the plugin, so using them alone does not make sense with ``on-fail`` set (or left to default) to ``restart``. Another value could make sense, for example, if you want to fence or standby nodes that cannot reach some external service. A more common use case for plugin resources is to configure them with a ``container`` meta-attribute set to the name of another resource that actually makes the service available, such as a virtual machine or container. With ``container`` set, the plugin resource will automatically be colocated with the containing resource and ordered after it, and the containing resource will be considered failed if the plugin resource fails. This allows monitoring of a service inside a virtual machine or container, with recovery of the virtual machine or container if the service fails. .. warning:: Nagios support is deprecated in Pacemaker. Support will be dropped entirely at the next major release of Pacemaker. For monitoring a service inside a virtual machine or container, the recommended alternative is to configure the virtual machine as a guest node or the container as a :ref:`bundle `. For other use cases, or when the virtual machine or container image cannot be modified, the recommended alternative is to write a custom OCF agent for the service (which may even call the Nagios plugin as part of its status action). -.. index:: - single: Resource; Upstart - single: Upstart; resources - -Upstart -_______ - -Some Linux distributions previously used `Upstart -`_ for system initialization and service -management. Pacemaker is able to manage services using Upstart if the local -system supports them and support was enabled when your Pacemaker software was -built. - -The *jobs* that specify how services are managed are usually provided by the -operating system distribution. - -.. important:: - - Make sure the host is *not* configured to start any Upstart services at boot - that will be controlled by the cluster. - -.. warning:: - - Upstart support is deprecated in Pacemaker. Upstart is no longer actively - maintained, and test platforms for it are no longer readily usable. Support - will be dropped entirely at the next major release of Pacemaker. - - .. _primitive-resource: Resource Properties ################### These values tell the cluster which resource agent to use for the resource, where to find that resource agent and what standards it conforms to. .. table:: **Properties of a Primitive Resource** :widths: 1 4 +-------------+------------------------------------------------------------------+ | Field | Description | +=============+==================================================================+ | id | .. index:: | | | single: id; resource | | | single: resource; property, id | | | | | | Your name for the resource | +-------------+------------------------------------------------------------------+ | class | .. index:: | | | single: class; resource | | | single: resource; property, class | | | | | | The standard the resource agent conforms to. Allowed values: | | | ``lsb``, ``ocf``, ``service``, ``stonith``, ``systemd``, | - | | ``nagios`` *(deprecated since 2.1.6)*, and ``upstart`` | - | | *(deprecated since 2.1.0)* | + | | and ``nagios`` *(deprecated since 2.1.6)* | +-------------+------------------------------------------------------------------+ | description | .. index:: | | | single: description; resource | | | single: resource; property, description | | | | | | A description of the Resource Agent, intended for local use. | | | E.g. ``IP address for website`` | +-------------+------------------------------------------------------------------+ | type | .. index:: | | | single: type; resource | | | single: resource; property, type | | | | | | The name of the Resource Agent you wish to use. E.g. | | | ``IPaddr`` or ``Filesystem`` | +-------------+------------------------------------------------------------------+ | provider | .. index:: | | | single: provider; resource | | | single: resource; property, provider | | | | | | The OCF spec allows multiple vendors to supply the same resource | | | agent. To use the OCF resource agents supplied by the Heartbeat | | | project, you would specify ``heartbeat`` here. | +-------------+------------------------------------------------------------------+ The XML definition of a resource can be queried with the **crm_resource** tool. For example: .. code-block:: none # crm_resource --resource Email --query-xml might produce: .. topic:: A system resource definition .. code-block:: xml .. note:: - One of the main drawbacks to system services (LSB, systemd or - Upstart) resources is that they do not allow any parameters! + One of the main drawbacks to system services (lsb and systemd) + is that they do not allow parameters .. topic:: An OCF resource definition .. code-block:: xml .. _resource_options: Resource Options ################ Resources have two types of options: *meta-attributes* and *instance attributes*. Meta-attributes apply to any type of resource, while instance attributes are specific to each resource agent. Resource Meta-Attributes ________________________ Meta-attributes are used by the cluster to decide how a resource should behave and can be easily set using the ``--meta`` option of the **crm_resource** command. .. list-table:: **Meta-attributes of a Primitive Resource** :class: longtable :widths: 2 2 3 5 :header-rows: 1 * - Name - Type - Default - Description * - .. _meta_priority: .. index:: single: priority; resource option single: resource; option, priority priority - :ref:`score ` - 0 - If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. * - .. _meta_critical: .. index:: single: critical; resource option single: resource; option, critical critical - :ref:`boolean ` - true - Use this value as the default for ``influence`` in all :ref:`colocation constraints ` involving this resource, as well as in the implicit colocation constraints created if this resource is in a :ref:`group `. For details, see :ref:`s-coloc-influence`. *(since 2.1.0)* * - .. _meta_target_role: .. index:: single: target-role; resource option single: resource; option, target-role target-role - :ref:`enumeration ` - Started - What state should the cluster attempt to keep this resource in? Allowed values: * ``Stopped:`` Force the resource to be stopped * ``Started:`` Allow the resource to be started (and in the case of :ref:`promotable ` clone resources, promoted if appropriate) * ``Unpromoted:`` Allow the resource to be started, but only in the unpromoted role if the resource is :ref:`promotable ` * ``Promoted:`` Equivalent to ``Started`` * - .. _meta_is_managed: .. _is_managed: .. index:: single: is-managed; resource option single: resource; option, is-managed is-managed - :ref:`boolean ` - true - If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. Maintenance mode overrides this setting. * - .. _meta_maintenance: .. _rsc_maintenance: .. index:: single: maintenance; resource option single: resource; option, maintenance maintenance - :ref:`boolean ` - false - If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying ``role`` as ``Stopped``). If true, the :ref:`maintenance-mode ` cluster option or :ref:`maintenance ` node attribute overrides this. * - .. _meta_resource_stickiness: .. _resource-stickiness: .. index:: single: resource-stickiness; resource option single: resource; option, resource-stickiness resource-stickiness - :ref:`score ` - 1 for individual clone instances, 0 for all other resources - A score that will be added to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. * - .. _meta_requires: .. _requires: .. index:: single: requires; resource option single: resource; option, requires requires - :ref:`enumeration ` - ``quorum`` for resources with a ``class`` of ``stonith``, otherwise ``unfencing`` if unfencing is active in the cluster, otherwise ``fencing`` if ``stonith-enabled`` is true, otherwise ``quorum`` - Conditions under which the resource can be started. Allowed values: * ``nothing:`` The cluster can always start this resource. * ``quorum:`` The cluster can start this resource only if a majority of the configured nodes are active. * ``fencing:`` The cluster can start this resource only if a majority of the configured nodes are active *and* any failed or unknown nodes have been :ref:`fenced `. * ``unfencing:`` The cluster can only start this resource if a majority of the configured nodes are active *and* any failed or unknown nodes have been fenced *and* only on nodes that have been :ref:`unfenced `. * - .. _meta_migration_threshold: .. index:: single: migration-threshold; resource option single: resource; option, migration-threshold migration-threshold - :ref:`score ` - INFINITY - How many failures may occur for this resource on a node, before this node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible); by contrast, the cluster treats ``INFINITY`` (the default) as a very large but finite number. This option has an effect only if the failed operation specifies ``on-fail`` as ``restart`` (the default), and additionally for failed ``start`` operations, if the cluster property ``start-failure-is-fatal`` is ``false``. * - .. _meta_failure_timeout: .. index:: single: failure-timeout; resource option single: resource; option, failure-timeout failure-timeout - :ref:`duration ` - 0 - How many seconds to wait before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. * - .. _meta_multiple_active: .. index:: single: multiple-active; resource option single: resource; option, multiple-active multiple-active - :ref:`enumeration ` - stop_start - What should the cluster do if it ever finds the resource active on more than one node? Allowed values: * ``block``: mark the resource as unmanaged * ``stop_only``: stop all active instances and leave them that way * ``stop_start``: stop all active instances and start the resource in one location only * ``stop_unexpected``: stop all active instances except where the resource should be active (this should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused; note that any resources ordered after this will still need to be restarted) *(since 2.1.3)* * - .. _meta_allow_migrate: .. index:: single: allow-migrate; resource option single: resource; option, allow-migrate allow-migrate - :ref:`boolean ` - true for ``ocf:pacemaker:remote`` resources, false otherwise - Whether the cluster should try to "live migrate" this resource when it needs to be moved (see :ref:`live-migration`) * - .. _meta_allow_unhealthy_nodes: .. index:: single: allow-unhealthy-nodes; resource option single: resource; option, allow-unhealthy-nodes allow-unhealthy-nodes - :ref:`boolean ` - false - Whether the resource should be able to run on a node even if the node's health score would otherwise prevent it (see :ref:`node-health`) *(since 2.1.3)* * - .. _meta_container_attribute_target: .. index:: single: container-attribute-target; resource option single: resource; option, container-attribute-target container-attribute-target - :ref:`enumeration ` - - Specific to bundle resources; see :ref:`s-bundle-attributes` As an example of setting resource options, if you performed the following commands on an LSB Email resource: .. code-block:: none # crm_resource --meta --resource Email --set-parameter priority --parameter-value 100 # crm_resource -m -r Email -p multiple-active -v block the resulting resource definition might be: .. topic:: An LSB resource with cluster options .. code-block:: xml In addition to the cluster-defined meta-attributes described above, you may also configure arbitrary meta-attributes of your own choosing. Most commonly, this would be done for use in :ref:`rules `. For example, an IT department might define a custom meta-attribute to indicate which company department each resource is intended for. To reduce the chance of name collisions with cluster-defined meta-attributes added in the future, it is recommended to use a unique, organization-specific prefix for such attributes. .. _s-resource-defaults: Setting Global Defaults for Resource Meta-Attributes ____________________________________________________ To set a default value for a resource option, add it to the ``rsc_defaults`` section with ``crm_attribute``. For example, .. code-block:: none # crm_attribute --type rsc_defaults --name is-managed --update false would prevent the cluster from starting or stopping any of the resources in the configuration (unless of course the individual resources were specifically enabled by having their ``is-managed`` set to ``true``). Resource Instance Attributes ____________________________ -The resource agents of some resource standards (lsb, systemd and upstart *not* -among them) can be given parameters which determine how they behave and which +The resource agents of some resource standards (lsb and systemd *not* among +them) can be given parameters which determine how they behave and which instance of a service they control. If your resource agent supports parameters, you can add them with the ``crm_resource`` command. For example, .. code-block:: none # crm_resource --resource Public-IP --set-parameter ip --parameter-value 192.0.2.2 would create an entry in the resource like this: .. topic:: An example OCF resource with instance attributes .. code-block:: xml For an OCF resource, the result would be an environment variable called ``OCF_RESKEY_ip`` with a value of ``192.0.2.2``. The list of instance attributes supported by an OCF resource agent can be found by calling the resource agent with the ``meta-data`` command. The output contains an XML description of all the supported attributes, their purpose and default values. .. topic:: Displaying the metadata for the Dummy resource agent template .. code-block:: none # export OCF_ROOT=/usr/lib/ocf # $OCF_ROOT/resource.d/pacemaker/Dummy meta-data .. code-block:: xml 1.1 This is a dummy OCF resource agent. It does absolutely nothing except keep track of whether it is running or not, and can be configured so that actions fail or take a long time. Its purpose is primarily for testing, and to serve as a template for resource agent writers. Example stateless resource agent Location to store the resource state in. State file Fake password field Password Fake attribute that can be changed to cause a reload Fake attribute that can be changed to cause a reload Number of seconds to sleep during operations. This can be used to test how the cluster reacts to operation timeouts. Operation sleep duration in seconds. Start, migrate_from, and reload-agent actions will return failure if running on the host specified here, but the resource will run successfully anyway (future monitor calls will find it running). This can be used to test on-fail=ignore. Report bogus start failure on specified host If this is set, the environment will be dumped to this file for every call. Environment dump file Pacemaker Remote Resources ########################## :ref:`Pacemaker Remote ` nodes are defined by resources. .. _remote_nodes: .. index:: single: node; remote single: Pacemaker Remote; remote node single: remote node Remote nodes ____________ A remote node is defined by a connection resource using the special, built-in **ocf:pacemaker:remote** resource agent. .. list-table:: **ocf:pacemaker:remote Instance Attributes** :class: longtable :widths: 2 2 3 5 :header-rows: 1 * - Name - Type - Default - Description * - .. _remote_server: .. index:: pair: remote node; server server - :ref:`text ` - resource ID - Hostname or IP address used to connect to the remote node. The remote executor on the remote node must be configured to accept connections on this address. * - .. _remote_port: .. index:: pair: remote node; port port - :ref:`port ` - 3121 - TCP port on the remote node used for its Pacemaker Remote connection. The remote executor on the remote node must be configured to listen on this port. * - .. _remote_reconnect_interval: .. index:: pair: remote node; reconnect_interval reconnect_interval - :ref:`duration ` - 0 - If positive, the cluster will attempt to reconnect to a remote node at this interval after an active connection has been lost. Otherwise, the cluster will attempt to reconnect immediately (after any fencing, if needed). .. _guest_nodes: .. index:: single: node; guest single: Pacemaker Remote; guest node single: guest node Guest Nodes ___________ When configuring a virtual machine as a guest node, the virtual machine is created using one of the usual resource agents for that purpose (for example, **ocf:heartbeat:VirtualDomain** or **ocf:heartbeat:Xen**), with additional meta-attributes. No restrictions are enforced on what agents may be used to create a guest node, but obviously the agent must create a distinct environment capable of running the remote executor and cluster resources. An additional requirement is that fencing the node hosting the guest node resource must be sufficient for ensuring the guest node is stopped. This means that not all hypervisors supported by **VirtualDomain** may be used to create guest nodes; if the guest can survive the hypervisor being fenced, it is unsuitable for use as a guest node. .. list-table:: **Guest node meta-attributes** :class: longtable :widths: 2 2 3 5 :header-rows: 1 * - Name - Type - Default - Description * - .. _meta_remote_node: .. index:: single: remote-node; resource option single: resource; option, remote-node remote-node - :ref:`text ` - - If specified, this resource defines a guest node using this node name. The guest must be configured to run the remote executor when it is started. This value *must not* be the same as any resource or node ID. * - .. _meta_remote_addr: .. index:: single: remote-addr; resource option single: resource; option, remote-addr remote-addr - :ref:`text ` - value of ``remote-node`` - If ``remote-node`` is specified, the hostname or IP address used to connect to the guest. The remote executor on the guest must be configured to accept connections on this address. * - .. _meta_remote_port: .. index:: single: remote-port; resource option single: resource; option, remote-port remote-port - :ref:`port ` - 3121 - If ``remote-node`` is specified, the port on the guest used for its Pacemaker Remote connection. The remote executor on the guest must be configured to listen on this port. * - .. _meta_remote_connect_timeout: .. index:: single: remote-connect-timeout; resource option single: resource; option, remote-connect-timeout remote-connect-timeout - :ref:`timeout ` - 60s - If ``remote-node`` is specified, how long before a pending guest connection will time out. * - .. _meta_remote_allow_migrate: .. index:: single: remote-allow-migrate; resource option single: resource; option, remote-allow-migrate remote-allow-migrate - :ref:`boolean ` - true - If ``remote-node`` is specified, this acts as the ``allow-migrate`` meta-attribute for its implicitly created remote connection resource (``ocf:pacemaker:remote``). Removing Pacemaker Remote Nodes _______________________________ If the resource creating a remote node connection or guest node is removed from the configuration, status output may continue to show the affected node (as offline). If you want to get rid of that output, run the following command, replacing ``$NODE_NAME`` appropriately: .. code-block:: none # crm_node --force --remove $NODE_NAME .. WARNING:: Be absolutely sure that there are no references to the node's resource in the configuration before running the above command. diff --git a/include/crm/common/logging_internal.h b/include/crm/common/logging_internal.h index 3f11d4ed8a..ec1cc291c5 100644 --- a/include/crm/common/logging_internal.h +++ b/include/crm/common/logging_internal.h @@ -1,247 +1,246 @@ /* * Copyright 2015-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_LOGGING_INTERNAL__H #define PCMK__CRM_COMMON_LOGGING_INTERNAL__H #include #include #include #ifdef __cplusplus extern "C" { #endif /* Some warnings are too noisy when logged every time a given function is called * (for example, using a deprecated feature). As an alternative, we allow * warnings to be logged once per invocation of the calling program. Each of * those warnings needs a flag defined here. */ enum pcmk__warnings { pcmk__wo_blind = (1 << 0), pcmk__wo_restart_type = (1 << 1), pcmk__wo_role_after = (1 << 2), pcmk__wo_poweroff = (1 << 3), pcmk__wo_require_all = (1 << 4), pcmk__wo_order_score = (1 << 5), pcmk__wo_remove_after = (1 << 7), pcmk__wo_ping_node = (1 << 8), pcmk__wo_group_order = (1 << 11), pcmk__wo_group_coloc = (1 << 12), - pcmk__wo_upstart = (1 << 13), pcmk__wo_nagios = (1 << 14), pcmk__wo_set_ordering = (1 << 15), pcmk__wo_rdisc_enabled = (1 << 16), pcmk__wo_rkt = (1 << 17), pcmk__wo_op_attr_expr = (1 << 19), pcmk__wo_instance_defaults = (1 << 20), pcmk__wo_multiple_rules = (1 << 21), pcmk__wo_clone_master_max = (1 << 23), pcmk__wo_clone_master_node_max = (1 << 24), pcmk__wo_master_role = (1 << 26), pcmk__wo_slave_role = (1 << 27), }; /*! * \internal * \brief Log a warning once per invocation of calling program * * \param[in] wo_flag enum pcmk__warnings value for this warning * \param[in] fmt... printf(3)-style format and arguments */ #define pcmk__warn_once(wo_flag, fmt...) do { \ if (!pcmk_is_set(pcmk__warnings, wo_flag)) { \ if (wo_flag == pcmk__wo_blind) { \ crm_warn(fmt); \ } else { \ pcmk__config_warn(fmt); \ } \ pcmk__warnings = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Warn-once", "logging", \ pcmk__warnings, \ (wo_flag), #wo_flag); \ } \ } while (0) typedef void (*pcmk__config_error_func) (void *ctx, const char *msg, ...) G_GNUC_PRINTF(2, 3); typedef void (*pcmk__config_warning_func) (void *ctx, const char *msg, ...) G_GNUC_PRINTF(2, 3); extern pcmk__config_error_func pcmk__config_error_handler; extern pcmk__config_warning_func pcmk__config_warning_handler; extern void *pcmk__config_error_context; extern void *pcmk__config_warning_context; void pcmk__set_config_error_handler(pcmk__config_error_func error_handler, void *error_context); void pcmk__set_config_warning_handler(pcmk__config_warning_func warning_handler, void *warning_context); /* Pacemaker library functions set this when a configuration error is found, * which turns on extra messages at the end of processing. */ extern bool pcmk__config_has_error; /* Pacemaker library functions set this when a configuration warning is found, * which turns on extra messages at the end of processing. */ extern bool pcmk__config_has_warning; /*! * \internal * \brief Log an error and make crm_verify return failure status * * \param[in] fmt... printf(3)-style format string and arguments */ #define pcmk__config_err(fmt...) do { \ pcmk__config_has_error = true; \ if (pcmk__config_error_handler == NULL) { \ crm_err(fmt); \ } else { \ pcmk__config_error_handler(pcmk__config_error_context, fmt); \ } \ } while (0) /*! * \internal * \brief Log a warning and make crm_verify return failure status * * \param[in] fmt... printf(3)-style format string and arguments */ #define pcmk__config_warn(fmt...) do { \ pcmk__config_has_warning = true; \ if (pcmk__config_warning_handler == NULL) { \ crm_warn(fmt); \ } else { \ pcmk__config_warning_handler(pcmk__config_warning_context, fmt);\ } \ } while (0) /*! * \internal * \brief Execute code depending on whether trace logging is enabled * * This is similar to \p do_crm_log_unlikely() except instead of logging, it * selects one of two code blocks to execute. * * \param[in] if_action Code block to execute if trace logging is enabled * \param[in] else_action Code block to execute if trace logging is not enabled * * \note Neither \p if_action nor \p else_action can contain a \p break or * \p continue statement. */ #define pcmk__if_tracing(if_action, else_action) do { \ static struct qb_log_callsite *trace_cs = NULL; \ \ if (trace_cs == NULL) { \ trace_cs = qb_log_callsite_get(__func__, __FILE__, \ "if_tracing", LOG_TRACE, \ __LINE__, crm_trace_nonlog); \ } \ if (crm_is_callsite_active(trace_cs, LOG_TRACE, \ crm_trace_nonlog)) { \ if_action; \ } else { \ else_action; \ } \ } while (0) /*! * \internal * \brief Log XML changes line-by-line in a formatted fashion * * \param[in] level Priority at which to log the messages * \param[in] xml XML to log * * \note This does nothing when \p level is \c LOG_STDOUT. */ #define pcmk__log_xml_changes(level, xml) do { \ uint8_t _level = pcmk__clip_log_level(level); \ static struct qb_log_callsite *xml_cs = NULL; \ \ switch (_level) { \ case LOG_STDOUT: \ case LOG_NEVER: \ break; \ default: \ if (xml_cs == NULL) { \ xml_cs = qb_log_callsite_get(__func__, __FILE__, \ "xml-changes", _level, \ __LINE__, 0); \ } \ if (crm_is_callsite_active(xml_cs, _level, 0)) { \ pcmk__log_xml_changes_as(__FILE__, __func__, __LINE__, \ 0, _level, xml); \ } \ break; \ } \ } while(0) /*! * \internal * \brief Log an XML patchset line-by-line in a formatted fashion * * \param[in] level Priority at which to log the messages * \param[in] patchset XML patchset to log * * \note This does nothing when \p level is \c LOG_STDOUT. */ #define pcmk__log_xml_patchset(level, patchset) do { \ uint8_t _level = pcmk__clip_log_level(level); \ static struct qb_log_callsite *xml_cs = NULL; \ \ switch (_level) { \ case LOG_STDOUT: \ case LOG_NEVER: \ break; \ default: \ if (xml_cs == NULL) { \ xml_cs = qb_log_callsite_get(__func__, __FILE__, \ "xml-patchset", _level, \ __LINE__, 0); \ } \ if (crm_is_callsite_active(xml_cs, _level, 0)) { \ pcmk__log_xml_patchset_as(__FILE__, __func__, __LINE__, \ 0, _level, patchset); \ } \ break; \ } \ } while(0) void pcmk__log_xml_changes_as(const char *file, const char *function, uint32_t line, uint32_t tags, uint8_t level, const xmlNode *xml); void pcmk__log_xml_patchset_as(const char *file, const char *function, uint32_t line, uint32_t tags, uint8_t level, const xmlNode *patchset); /*! * \internal * \brief Initialize logging for command line tools * * \param[in] name The name of the program * \param[in] verbosity How verbose to be in logging * * \note \p verbosity is not the same as the logging level (LOG_ERR, etc.). */ void pcmk__cli_init_logging(const char *name, unsigned int verbosity); int pcmk__add_logfile(const char *filename); void pcmk__add_logfiles(gchar **log_files, pcmk__output_t *out); void pcmk__free_common_logger(void); #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_LOGGING_INTERNAL__H diff --git a/lib/common/agents.c b/lib/common/agents.c index 8bdbe8c2ca..81cd043c58 100644 --- a/lib/common/agents.c +++ b/lib/common/agents.c @@ -1,195 +1,194 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include /*! * \brief Get capabilities of a resource agent standard * * \param[in] standard Standard name * * \return Bitmask of enum pcmk_ra_caps values */ uint32_t pcmk_get_ra_caps(const char *standard) { /* @COMPAT This should probably be case-sensitive, but isn't, * for backward compatibility. */ if (standard == NULL) { return pcmk_ra_cap_none; } else if (!strcasecmp(standard, PCMK_RESOURCE_CLASS_OCF)) { return pcmk_ra_cap_provider | pcmk_ra_cap_params | pcmk_ra_cap_unique | pcmk_ra_cap_promotable | pcmk_ra_cap_cli_exec; } else if (!strcasecmp(standard, PCMK_RESOURCE_CLASS_STONITH)) { /* @COMPAT Stonith resources can't really be unique clones, but we've * allowed it in the past and have it in some scheduler regression tests * (which were likely never used as real configurations). * * @TODO Remove pcmk_ra_cap_unique at the next major schema version * bump, with a transform to remove PCMK_META_GLOBALLY_UNIQUE from the * config. */ return pcmk_ra_cap_params | pcmk_ra_cap_unique | pcmk_ra_cap_stdin | pcmk_ra_cap_fence_params; } else if (!strcasecmp(standard, PCMK_RESOURCE_CLASS_LSB)) { return pcmk_ra_cap_status | pcmk_ra_cap_cli_exec; } else if (!strcasecmp(standard, PCMK_RESOURCE_CLASS_SYSTEMD) - || !strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) - || !strcasecmp(standard, PCMK_RESOURCE_CLASS_UPSTART)) { + || !strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE)) { return pcmk_ra_cap_status; } else if (!strcasecmp(standard, PCMK_RESOURCE_CLASS_NAGIOS)) { return pcmk_ra_cap_params; } return pcmk_ra_cap_none; } int pcmk__effective_rc(int rc) { int remapped_rc = rc; switch (rc) { case PCMK_OCF_DEGRADED: remapped_rc = PCMK_OCF_OK; break; case PCMK_OCF_DEGRADED_PROMOTED: remapped_rc = PCMK_OCF_RUNNING_PROMOTED; break; default: break; } return remapped_rc; } char * crm_generate_ra_key(const char *standard, const char *provider, const char *type) { bool std_empty = pcmk__str_empty(standard); bool prov_empty = pcmk__str_empty(provider); bool ty_empty = pcmk__str_empty(type); if (std_empty || ty_empty) { return NULL; } return crm_strdup_printf("%s%s%s:%s", standard, (prov_empty ? "" : ":"), (prov_empty ? "" : provider), type); } /*! * \brief Parse a "standard[:provider]:type" agent specification * * \param[in] spec Agent specification * \param[out] standard Newly allocated memory containing agent standard (or NULL) * \param[out] provider Newly allocated memory containing agent provider (or NULL) * \param[put] type Newly allocated memory containing agent type (or NULL) * * \return pcmk_ok if the string could be parsed, -EINVAL otherwise * * \note It is acceptable for the type to contain a ':' if the standard supports * that. For example, systemd supports the form "systemd:UNIT@A:B". * \note It is the caller's responsibility to free the returned values. */ int crm_parse_agent_spec(const char *spec, char **standard, char **provider, char **type) { char *colon; CRM_CHECK(spec && standard && provider && type, return -EINVAL); *standard = NULL; *provider = NULL; *type = NULL; colon = strchr(spec, ':'); if ((colon == NULL) || (colon == spec)) { return -EINVAL; } *standard = strndup(spec, colon - spec); spec = colon + 1; if (pcmk_is_set(pcmk_get_ra_caps(*standard), pcmk_ra_cap_provider)) { colon = strchr(spec, ':'); if ((colon == NULL) || (colon == spec)) { free(*standard); return -EINVAL; } *provider = strndup(spec, colon - spec); spec = colon + 1; } if (*spec == '\0') { free(*standard); free(*provider); return -EINVAL; } *type = strdup(spec); return pcmk_ok; } /*! * \brief Check whether a given stonith parameter is handled by Pacemaker * * Return true if a given string is the name of one of the special resource * instance attributes interpreted directly by Pacemaker for stonith-class * resources. * * \param[in] param Parameter name to check * * \return true if \p param is a special fencing parameter */ bool pcmk_stonith_param(const char *param) { if (param == NULL) { return false; } if (pcmk__str_any_of(param, PCMK_STONITH_PROVIDES, PCMK_STONITH_STONITH_TIMEOUT, NULL)) { return true; } if (!pcmk__starts_with(param, "pcmk_")) { // Short-circuit common case return false; } if (pcmk__str_any_of(param, PCMK_STONITH_ACTION_LIMIT, PCMK_STONITH_DELAY_BASE, PCMK_STONITH_DELAY_MAX, PCMK_STONITH_HOST_ARGUMENT, PCMK_STONITH_HOST_CHECK, PCMK_STONITH_HOST_LIST, PCMK_STONITH_HOST_MAP, NULL)) { return true; } param = strchr(param + 5, '_'); // Skip past "pcmk_ACTION" return pcmk__str_any_of(param, "_action", "_timeout", "_retries", NULL); } diff --git a/lib/common/tests/agents/pcmk_get_ra_caps_test.c b/lib/common/tests/agents/pcmk_get_ra_caps_test.c index 3756fc1b30..f8e54e9f84 100644 --- a/lib/common/tests/agents/pcmk_get_ra_caps_test.c +++ b/lib/common/tests/agents/pcmk_get_ra_caps_test.c @@ -1,73 +1,71 @@ /* * Copyright 2022-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include static void ocf_standard(void **state) { uint32_t expected = pcmk_ra_cap_provider | pcmk_ra_cap_params | pcmk_ra_cap_unique | pcmk_ra_cap_promotable | pcmk_ra_cap_cli_exec; assert_int_equal(pcmk_get_ra_caps("ocf"), expected); assert_int_equal(pcmk_get_ra_caps("OCF"), expected); } static void stonith_standard(void **state) { uint32_t expected = pcmk_ra_cap_params | pcmk_ra_cap_unique | pcmk_ra_cap_stdin | pcmk_ra_cap_fence_params; assert_int_equal(pcmk_get_ra_caps("stonith"), expected); assert_int_equal(pcmk_get_ra_caps("StOnItH"), expected); } static void service_standard(void **state) { assert_int_equal(pcmk_get_ra_caps("systemd"), pcmk_ra_cap_status); assert_int_equal(pcmk_get_ra_caps("SYSTEMD"), pcmk_ra_cap_status); assert_int_equal(pcmk_get_ra_caps("service"), pcmk_ra_cap_status); assert_int_equal(pcmk_get_ra_caps("SeRvIcE"), pcmk_ra_cap_status); - assert_int_equal(pcmk_get_ra_caps("upstart"), pcmk_ra_cap_status); - assert_int_equal(pcmk_get_ra_caps("uPsTaRt"), pcmk_ra_cap_status); } static void lsb_standard(void **state) { uint32_t expected = pcmk_ra_cap_status | pcmk_ra_cap_cli_exec; assert_int_equal(pcmk_get_ra_caps("lsb"), expected); assert_int_equal(pcmk_get_ra_caps("LSB"), expected); } static void nagios_standard(void **state) { assert_int_equal(pcmk_get_ra_caps("nagios"), pcmk_ra_cap_params); assert_int_equal(pcmk_get_ra_caps("NAGios"), pcmk_ra_cap_params); } static void unknown_standard(void **state) { assert_int_equal(pcmk_get_ra_caps("blahblah"), pcmk_ra_cap_none); assert_int_equal(pcmk_get_ra_caps(""), pcmk_ra_cap_none); assert_int_equal(pcmk_get_ra_caps(NULL), pcmk_ra_cap_none); } PCMK__UNIT_TEST(NULL, NULL, cmocka_unit_test(ocf_standard), cmocka_unit_test(stonith_standard), cmocka_unit_test(service_standard), cmocka_unit_test(lsb_standard), cmocka_unit_test(nagios_standard), cmocka_unit_test(unknown_standard)) diff --git a/lib/pacemaker/pcmk_injections.c b/lib/pacemaker/pcmk_injections.c index 7115f94f72..d3e5fd13a2 100644 --- a/lib/pacemaker/pcmk_injections.c +++ b/lib/pacemaker/pcmk_injections.c @@ -1,778 +1,777 @@ /* * Copyright 2009-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // lrmd_event_data_t, etc. #include #include #include #include "libpacemaker_private.h" bool pcmk__simulate_node_config = false; #define XPATH_NODE_CONFIG "//" PCMK_XE_NODE "[@" PCMK_XA_UNAME "='%s']" #define XPATH_NODE_STATE "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" #define XPATH_NODE_STATE_BY_ID "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_ID "='%s']" #define XPATH_RSC_HISTORY XPATH_NODE_STATE \ "//" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" /*! * \internal * \brief Inject a fictitious transient node attribute into scheduler input * * \param[in,out] out Output object for displaying error messages * \param[in,out] cib_node \c PCMK__XE_NODE_STATE XML to inject attribute into * \param[in] name Transient node attribute name to inject * \param[in] value Transient node attribute value to inject */ static void inject_transient_attr(pcmk__output_t *out, xmlNode *cib_node, const char *name, const char *value) { xmlNode *attrs = NULL; xmlNode *instance_attrs = NULL; const char *node_uuid = pcmk__xe_id(cib_node); out->message(out, "inject-attr", name, value, cib_node); attrs = pcmk__xe_first_child(cib_node, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL, NULL); if (attrs == NULL) { attrs = pcmk__xe_create(cib_node, PCMK__XE_TRANSIENT_ATTRIBUTES); crm_xml_add(attrs, PCMK_XA_ID, node_uuid); } instance_attrs = pcmk__xe_first_child(attrs, PCMK_XE_INSTANCE_ATTRIBUTES, NULL, NULL); if (instance_attrs == NULL) { instance_attrs = pcmk__xe_create(attrs, PCMK_XE_INSTANCE_ATTRIBUTES); crm_xml_add(instance_attrs, PCMK_XA_ID, node_uuid); } crm_create_nvpair_xml(instance_attrs, NULL, name, value); } /*! * \internal * \brief Inject a fictitious fail count into a scheduler input * * \param[in,out] out Output object for displaying error messages * \param[in,out] cib_conn CIB connection * \param[in,out] cib_node Node state XML to inject into * \param[in] resource ID of resource for fail count to inject * \param[in] task Action name for fail count to inject * \param[in] interval_ms Action interval (in milliseconds) for fail count * \param[in] exit_status Action result for fail count to inject (if * \c PCMK_OCF_OK, or \c PCMK_OCF_NOT_RUNNING when * \p interval_ms is 0, inject nothing) */ void pcmk__inject_failcount(pcmk__output_t *out, cib_t *cib_conn, xmlNode *cib_node, const char *resource, const char *task, guint interval_ms, int exit_status) { char *name = NULL; char *value = NULL; int failcount = 0; xmlNode *output = NULL; CRM_CHECK((out != NULL) && (cib_conn != NULL) && (cib_node != NULL) && (resource != NULL) && (task != NULL), return); if ((exit_status == PCMK_OCF_OK) || ((exit_status == PCMK_OCF_NOT_RUNNING) && (interval_ms == 0))) { return; } // Get current failcount and increment it name = pcmk__failcount_name(resource, task, interval_ms); if (cib__get_node_attrs(out, cib_conn, PCMK_XE_STATUS, pcmk__xe_id(cib_node), NULL, NULL, NULL, name, NULL, &output) == pcmk_rc_ok) { if (crm_element_value_int(output, PCMK_XA_VALUE, &failcount) != 0) { failcount = 0; } } value = pcmk__itoa(failcount + 1); inject_transient_attr(out, cib_node, name, value); free(name); free(value); pcmk__xml_free(output); name = pcmk__lastfailure_name(resource, task, interval_ms); value = pcmk__ttoa(time(NULL)); inject_transient_attr(out, cib_node, name, value); free(name); free(value); } /*! * \internal * \brief Create a CIB configuration entry for a fictitious node * * \param[in,out] cib_conn CIB object to use * \param[in] node Node name to use */ static void create_node_entry(cib_t *cib_conn, const char *node) { int rc = pcmk_ok; char *xpath = crm_strdup_printf(XPATH_NODE_CONFIG, node); rc = cib_conn->cmds->query(cib_conn, xpath, NULL, cib_xpath|cib_sync_call); if (rc == -ENXIO) { // Only add if not already existing xmlNode *cib_object = pcmk__xe_create(NULL, PCMK_XE_NODE); crm_xml_add(cib_object, PCMK_XA_ID, node); // Use node name as ID crm_xml_add(cib_object, PCMK_XA_UNAME, node); cib_conn->cmds->create(cib_conn, PCMK_XE_NODES, cib_object, cib_sync_call); /* Not bothering with subsequent query to see if it exists, we'll bomb out later in the call to query_node_uuid()... */ pcmk__xml_free(cib_object); } free(xpath); } /*! * \internal * \brief Synthesize a fake executor event for an action * * \param[in] cib_resource XML for any existing resource action history * \param[in] task Name of action to synthesize * \param[in] interval_ms Interval of action to synthesize * \param[in] outcome Result of action to synthesize * * \return Newly allocated executor event * \note It is the caller's responsibility to free the result with * lrmd_free_event(). */ static lrmd_event_data_t * create_op(const xmlNode *cib_resource, const char *task, guint interval_ms, int outcome) { lrmd_event_data_t *op = NULL; xmlNode *xop = NULL; op = lrmd_new_event(pcmk__xe_id(cib_resource), task, interval_ms); lrmd__set_result(op, outcome, PCMK_EXEC_DONE, "Simulated action result"); op->params = NULL; // Not needed for simulation purposes op->t_run = time(NULL); op->t_rcchange = op->t_run; // Use a call ID higher than any existing history entries op->call_id = 0; for (xop = pcmk__xe_first_child(cib_resource, NULL, NULL, NULL); xop != NULL; xop = pcmk__xe_next(xop)) { int tmp = 0; crm_element_value_int(xop, PCMK__XA_CALL_ID, &tmp); if (tmp > op->call_id) { op->call_id = tmp; } } op->call_id++; return op; } /*! * \internal * \brief Inject a fictitious resource history entry into a scheduler input * * \param[in,out] cib_resource Resource history XML to inject entry into * \param[in,out] op Action result to inject * \param[in] node Name of node where the action occurred * \param[in] target_rc Expected result for action to inject * * \return XML of injected resource history entry */ xmlNode * pcmk__inject_action_result(xmlNode *cib_resource, lrmd_event_data_t *op, const char *node, int target_rc) { return pcmk__create_history_xml(cib_resource, op, CRM_FEATURE_SET, target_rc, node, crm_system_name); } /*! * \internal * \brief Inject a fictitious node into a scheduler input * * \param[in,out] cib_conn Scheduler input CIB to inject node into * \param[in] node Name of node to inject * \param[in] uuid UUID of node to inject * * \return XML of \c PCMK__XE_NODE_STATE entry for new node * \note If the global pcmk__simulate_node_config has been set to true, a * node entry in the configuration section will be added, as well as a * node state entry in the status section. */ xmlNode * pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid) { int rc = pcmk_ok; xmlNode *cib_object = NULL; char *xpath = crm_strdup_printf(XPATH_NODE_STATE, node); bool duplicate = false; char *found_uuid = NULL; if (pcmk__simulate_node_config) { create_node_entry(cib_conn, node); } rc = cib_conn->cmds->query(cib_conn, xpath, &cib_object, cib_xpath|cib_sync_call); if ((cib_object != NULL) && (pcmk__xe_id(cib_object) == NULL)) { crm_err("Detected multiple " PCMK__XE_NODE_STATE " entries for " "xpath=%s, bailing", xpath); duplicate = true; goto done; } if (rc == -ENXIO) { if (uuid == NULL) { query_node_uuid(cib_conn, node, &found_uuid, NULL); } else { found_uuid = strdup(uuid); } if (found_uuid) { char *xpath_by_uuid = crm_strdup_printf(XPATH_NODE_STATE_BY_ID, found_uuid); /* It's possible that a PCMK__XE_NODE_STATE entry doesn't have a * PCMK_XA_UNAME yet */ rc = cib_conn->cmds->query(cib_conn, xpath_by_uuid, &cib_object, cib_xpath|cib_sync_call); if ((cib_object != NULL) && (pcmk__xe_id(cib_object) == NULL)) { crm_err("Can't inject node state for %s because multiple " "state entries found for ID %s", node, found_uuid); duplicate = true; free(xpath_by_uuid); goto done; } else if (cib_object != NULL) { crm_xml_add(cib_object, PCMK_XA_UNAME, node); rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_STATUS, cib_object, cib_sync_call); } free(xpath_by_uuid); } } if (rc == -ENXIO) { cib_object = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE); crm_xml_add(cib_object, PCMK_XA_ID, found_uuid); crm_xml_add(cib_object, PCMK_XA_UNAME, node); cib_conn->cmds->create(cib_conn, PCMK_XE_STATUS, cib_object, cib_sync_call); pcmk__xml_free(cib_object); rc = cib_conn->cmds->query(cib_conn, xpath, &cib_object, cib_xpath|cib_sync_call); crm_trace("Injecting node state for %s (rc=%d)", node, rc); } done: free(found_uuid); free(xpath); if (duplicate) { crm_log_xml_warn(cib_object, "Duplicates"); crm_exit(CRM_EX_SOFTWARE); return NULL; // not reached, but makes static analysis happy } CRM_ASSERT(rc == pcmk_ok); return cib_object; } /*! * \internal * \brief Inject a fictitious node state change into a scheduler input * * \param[in,out] cib_conn Scheduler input CIB to inject into * \param[in] node Name of node to inject change for * \param[in] up If true, change state to online, otherwise offline * * \return XML of changed (or added) node state entry */ xmlNode * pcmk__inject_node_state_change(cib_t *cib_conn, const char *node, bool up) { xmlNode *cib_node = pcmk__inject_node(cib_conn, node, NULL); if (up) { pcmk__xe_set_props(cib_node, PCMK__XA_IN_CCM, PCMK_VALUE_TRUE, PCMK_XA_CRMD, PCMK_VALUE_ONLINE, PCMK__XA_JOIN, CRMD_JOINSTATE_MEMBER, PCMK_XA_EXPECTED, CRMD_JOINSTATE_MEMBER, NULL); } else { pcmk__xe_set_props(cib_node, PCMK__XA_IN_CCM, PCMK_VALUE_FALSE, PCMK_XA_CRMD, PCMK_VALUE_OFFLINE, PCMK__XA_JOIN, CRMD_JOINSTATE_DOWN, PCMK_XA_EXPECTED, CRMD_JOINSTATE_DOWN, NULL); } crm_xml_add(cib_node, PCMK_XA_CRM_DEBUG_ORIGIN, crm_system_name); return cib_node; } /*! * \internal * \brief Check whether a node has history for a given resource * * \param[in,out] cib_node Node state XML to check * \param[in] resource Resource name to check for * * \return Resource's \c PCMK__XE_LRM_RESOURCE XML entry beneath \p cib_node if * found, otherwise \c NULL */ static xmlNode * find_resource_xml(xmlNode *cib_node, const char *resource) { const char *node = crm_element_value(cib_node, PCMK_XA_UNAME); char *xpath = crm_strdup_printf(XPATH_RSC_HISTORY, node, resource); xmlNode *match = get_xpath_object(xpath, cib_node, LOG_TRACE); free(xpath); return match; } /*! * \internal * \brief Inject a resource history element into a scheduler input * * \param[in,out] out Output object for displaying error messages * \param[in,out] cib_node Node state XML to inject resource history entry into * \param[in] resource ID (in configuration) of resource to inject * \param[in] lrm_name ID as used in history (could be clone instance) * \param[in] rclass Resource agent class of resource to inject * \param[in] rtype Resource agent type of resource to inject * \param[in] rprovider Resource agent provider of resource to inject * * \return XML of injected resource history element * \note If a history element already exists under either \p resource or * \p lrm_name, this will return it rather than injecting a new one. */ xmlNode * pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node, const char *resource, const char *lrm_name, const char *rclass, const char *rtype, const char *rprovider) { xmlNode *lrm = NULL; xmlNode *container = NULL; xmlNode *cib_resource = NULL; cib_resource = find_resource_xml(cib_node, resource); if (cib_resource != NULL) { /* If an existing LRM history entry uses the resource name, * continue using it, even if lrm_name is different. */ return cib_resource; } // Check for history entry under preferred name if (strcmp(resource, lrm_name) != 0) { cib_resource = find_resource_xml(cib_node, lrm_name); if (cib_resource != NULL) { return cib_resource; } } if ((rclass == NULL) || (rtype == NULL)) { // @TODO query configuration for class, provider, type out->err(out, "Resource %s not found in the status section of %s " "(supply class and type to continue)", resource, pcmk__xe_id(cib_node)); return NULL; } else if (!pcmk__strcase_any_of(rclass, PCMK_RESOURCE_CLASS_OCF, PCMK_RESOURCE_CLASS_STONITH, PCMK_RESOURCE_CLASS_SERVICE, - PCMK_RESOURCE_CLASS_UPSTART, PCMK_RESOURCE_CLASS_SYSTEMD, PCMK_RESOURCE_CLASS_LSB, NULL)) { out->err(out, "Invalid class for %s: %s", resource, rclass); return NULL; } else if (pcmk_is_set(pcmk_get_ra_caps(rclass), pcmk_ra_cap_provider) && (rprovider == NULL)) { // @TODO query configuration for provider out->err(out, "Please specify the provider for resource %s", resource); return NULL; } crm_info("Injecting new resource %s into node state '%s'", lrm_name, pcmk__xe_id(cib_node)); lrm = pcmk__xe_first_child(cib_node, PCMK__XE_LRM, NULL, NULL); if (lrm == NULL) { const char *node_uuid = pcmk__xe_id(cib_node); lrm = pcmk__xe_create(cib_node, PCMK__XE_LRM); crm_xml_add(lrm, PCMK_XA_ID, node_uuid); } container = pcmk__xe_first_child(lrm, PCMK__XE_LRM_RESOURCES, NULL, NULL); if (container == NULL) { container = pcmk__xe_create(lrm, PCMK__XE_LRM_RESOURCES); } cib_resource = pcmk__xe_create(container, PCMK__XE_LRM_RESOURCE); // If we're creating a new entry, use the preferred name crm_xml_add(cib_resource, PCMK_XA_ID, lrm_name); crm_xml_add(cib_resource, PCMK_XA_CLASS, rclass); crm_xml_add(cib_resource, PCMK_XA_PROVIDER, rprovider); crm_xml_add(cib_resource, PCMK_XA_TYPE, rtype); return cib_resource; } /*! * \internal * \brief Inject a ticket attribute into ticket state * * \param[in,out] out Output object for displaying error messages * \param[in] ticket_id Ticket whose state should be changed * \param[in] attr_name Ticket attribute name to inject * \param[in] attr_value Boolean value of ticket attribute to inject * \param[in,out] cib CIB object to use * * \return Standard Pacemaker return code */ static int set_ticket_state_attr(pcmk__output_t *out, const char *ticket_id, const char *attr_name, bool attr_value, cib_t *cib) { int rc = pcmk_rc_ok; xmlNode *xml_top = NULL; xmlNode *ticket_state_xml = NULL; // Check for an existing ticket state entry rc = pcmk__get_ticket_state(cib, ticket_id, &ticket_state_xml); if (rc == pcmk_rc_duplicate_id) { out->err(out, "Multiple " PCMK__XE_TICKET_STATE "s match ticket_id=%s", ticket_id); rc = pcmk_rc_ok; } if (rc == pcmk_rc_ok) { // Ticket state found, use it crm_debug("Injecting attribute into existing ticket state %s", ticket_id); xml_top = ticket_state_xml; } else if (rc == ENXIO) { // No ticket state, create it xmlNode *xml_obj = NULL; xml_top = pcmk__xe_create(NULL, PCMK_XE_STATUS); xml_obj = pcmk__xe_create(xml_top, PCMK_XE_TICKETS); ticket_state_xml = pcmk__xe_create(xml_obj, PCMK__XE_TICKET_STATE); crm_xml_add(ticket_state_xml, PCMK_XA_ID, ticket_id); } else { // Error return rc; } // Add the attribute to the ticket state pcmk__xe_set_bool_attr(ticket_state_xml, attr_name, attr_value); crm_log_xml_debug(xml_top, "Update"); // Commit the change to the CIB rc = cib->cmds->modify(cib, PCMK_XE_STATUS, xml_top, cib_sync_call); rc = pcmk_legacy2rc(rc); pcmk__xml_free(xml_top); return rc; } /*! * \internal * \brief Inject a fictitious action into the cluster * * \param[in,out] out Output object for displaying error messages * \param[in] spec Action specification to inject * \param[in,out] cib CIB object for scheduler input * \param[in] scheduler Scheduler data */ static void inject_action(pcmk__output_t *out, const char *spec, cib_t *cib, const pcmk_scheduler_t *scheduler) { int rc; int outcome = PCMK_OCF_OK; guint interval_ms = 0; char *key = NULL; char *node = NULL; char *task = NULL; char *resource = NULL; const char *rtype = NULL; const char *rclass = NULL; const char *rprovider = NULL; xmlNode *cib_op = NULL; xmlNode *cib_node = NULL; xmlNode *cib_resource = NULL; const pcmk_resource_t *rsc = NULL; lrmd_event_data_t *op = NULL; out->message(out, "inject-spec", spec); key = pcmk__assert_alloc(1, strlen(spec) + 1); node = pcmk__assert_alloc(1, strlen(spec) + 1); rc = sscanf(spec, "%[^@]@%[^=]=%d", key, node, &outcome); if (rc != 3) { out->err(out, "Invalid operation spec: %s. Only found %d fields", spec, rc); goto done; } parse_op_key(key, &resource, &task, &interval_ms); rsc = pe_find_resource(scheduler->priv->resources, resource); if (rsc == NULL) { out->err(out, "Invalid resource name: %s", resource); goto done; } rclass = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); rtype = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); rprovider = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); cib_node = pcmk__inject_node(cib, node, NULL); CRM_ASSERT(cib_node != NULL); pcmk__inject_failcount(out, cib, cib_node, resource, task, interval_ms, outcome); cib_resource = pcmk__inject_resource_history(out, cib_node, resource, resource, rclass, rtype, rprovider); CRM_ASSERT(cib_resource != NULL); op = create_op(cib_resource, task, interval_ms, outcome); CRM_ASSERT(op != NULL); cib_op = pcmk__inject_action_result(cib_resource, op, node, 0); CRM_ASSERT(cib_op != NULL); lrmd_free_event(op); rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call); CRM_ASSERT(rc == pcmk_ok); done: free(task); free(node); free(key); } /*! * \internal * \brief Inject fictitious scheduler inputs * * \param[in,out] scheduler Scheduler data * \param[in,out] cib CIB object for scheduler input to modify * \param[in] injections Injections to apply */ void pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib, const pcmk_injections_t *injections) { int rc = pcmk_ok; const GList *iter = NULL; xmlNode *cib_node = NULL; pcmk__output_t *out = scheduler->priv->out; out->message(out, "inject-modify-config", injections->quorum, injections->watchdog); if (injections->quorum != NULL) { xmlNode *top = pcmk__xe_create(NULL, PCMK_XE_CIB); /* crm_xml_add(top, PCMK_XA_DC_UUID, dc_uuid); */ crm_xml_add(top, PCMK_XA_HAVE_QUORUM, injections->quorum); rc = cib->cmds->modify(cib, NULL, top, cib_sync_call); CRM_ASSERT(rc == pcmk_ok); } if (injections->watchdog != NULL) { rc = cib__update_node_attr(out, cib, cib_sync_call, PCMK_XE_CRM_CONFIG, NULL, NULL, NULL, NULL, PCMK_OPT_HAVE_WATCHDOG, injections->watchdog, NULL, NULL); CRM_ASSERT(rc == pcmk_rc_ok); } for (iter = injections->node_up; iter != NULL; iter = iter->next) { const char *node = (const char *) iter->data; out->message(out, "inject-modify-node", "Online", node); cib_node = pcmk__inject_node_state_change(cib, node, true); CRM_ASSERT(cib_node != NULL); rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call); CRM_ASSERT(rc == pcmk_ok); pcmk__xml_free(cib_node); } for (iter = injections->node_down; iter != NULL; iter = iter->next) { const char *node = (const char *) iter->data; char *xpath = NULL; out->message(out, "inject-modify-node", "Offline", node); cib_node = pcmk__inject_node_state_change(cib, node, false); CRM_ASSERT(cib_node != NULL); rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call); CRM_ASSERT(rc == pcmk_ok); pcmk__xml_free(cib_node); xpath = crm_strdup_printf("//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" "/" PCMK__XE_LRM, node); cib->cmds->remove(cib, xpath, NULL, cib_xpath|cib_sync_call); free(xpath); xpath = crm_strdup_printf("//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" "/" PCMK__XE_TRANSIENT_ATTRIBUTES, node); cib->cmds->remove(cib, xpath, NULL, cib_xpath|cib_sync_call); free(xpath); } for (iter = injections->node_fail; iter != NULL; iter = iter->next) { const char *node = (const char *) iter->data; out->message(out, "inject-modify-node", "Failing", node); cib_node = pcmk__inject_node_state_change(cib, node, true); crm_xml_add(cib_node, PCMK__XA_IN_CCM, PCMK_VALUE_FALSE); CRM_ASSERT(cib_node != NULL); rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call); CRM_ASSERT(rc == pcmk_ok); pcmk__xml_free(cib_node); } for (iter = injections->ticket_grant; iter != NULL; iter = iter->next) { const char *ticket_id = (const char *) iter->data; out->message(out, "inject-modify-ticket", "Granting", ticket_id); rc = set_ticket_state_attr(out, ticket_id, PCMK__XA_GRANTED, true, cib); CRM_ASSERT(rc == pcmk_rc_ok); } for (iter = injections->ticket_revoke; iter != NULL; iter = iter->next) { const char *ticket_id = (const char *) iter->data; out->message(out, "inject-modify-ticket", "Revoking", ticket_id); rc = set_ticket_state_attr(out, ticket_id, PCMK__XA_GRANTED, false, cib); CRM_ASSERT(rc == pcmk_rc_ok); } for (iter = injections->ticket_standby; iter != NULL; iter = iter->next) { const char *ticket_id = (const char *) iter->data; out->message(out, "inject-modify-ticket", "Standby", ticket_id); rc = set_ticket_state_attr(out, ticket_id, PCMK_XA_STANDBY, true, cib); CRM_ASSERT(rc == pcmk_rc_ok); } for (iter = injections->ticket_activate; iter != NULL; iter = iter->next) { const char *ticket_id = (const char *) iter->data; out->message(out, "inject-modify-ticket", "Activating", ticket_id); rc = set_ticket_state_attr(out, ticket_id, PCMK_XA_STANDBY, false, cib); CRM_ASSERT(rc == pcmk_rc_ok); } for (iter = injections->op_inject; iter != NULL; iter = iter->next) { inject_action(out, (const char *) iter->data, cib, scheduler); } if (!out->is_quiet(out)) { out->end_list(out); } } void pcmk_free_injections(pcmk_injections_t *injections) { if (injections == NULL) { return; } g_list_free_full(injections->node_up, g_free); g_list_free_full(injections->node_down, g_free); g_list_free_full(injections->node_fail, g_free); g_list_free_full(injections->op_fail, g_free); g_list_free_full(injections->op_inject, g_free); g_list_free_full(injections->ticket_grant, g_free); g_list_free_full(injections->ticket_revoke, g_free); g_list_free_full(injections->ticket_standby, g_free); g_list_free_full(injections->ticket_activate, g_free); free(injections->quorum); free(injections->watchdog); free(injections); } diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c index 5b813b4821..24471d018e 100644 --- a/lib/pengine/complex.c +++ b/lib/pengine/complex.c @@ -1,1310 +1,1304 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include "pe_status_private.h" void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length); static pcmk_node_t *active_node(const pcmk_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean); static pcmk__rsc_methods_t resource_class_functions[] = { { native_unpack, native_find_rsc, native_parameter, native_active, native_resource_state, native_location, native_free, pe__count_common, pe__native_is_filtered, active_node, pe__primitive_max_per_node, }, { group_unpack, native_find_rsc, native_parameter, group_active, group_resource_state, native_location, group_free, pe__count_common, pe__group_is_filtered, active_node, pe__group_max_per_node, }, { clone_unpack, native_find_rsc, native_parameter, clone_active, clone_resource_state, native_location, clone_free, pe__count_common, pe__clone_is_filtered, active_node, pe__clone_max_per_node, }, { pe__unpack_bundle, native_find_rsc, native_parameter, pe__bundle_active, pe__bundle_resource_state, native_location, pe__free_bundle, pe__count_bundle, pe__bundle_is_filtered, pe__bundle_active_node, pe__bundle_max_per_node, } }; static enum pcmk__rsc_variant get_resource_type(const char *name) { if (pcmk__str_eq(name, PCMK_XE_PRIMITIVE, pcmk__str_casei)) { return pcmk__rsc_variant_primitive; } else if (pcmk__str_eq(name, PCMK_XE_GROUP, pcmk__str_casei)) { return pcmk__rsc_variant_group; } else if (pcmk__str_eq(name, PCMK_XE_CLONE, pcmk__str_casei)) { return pcmk__rsc_variant_clone; } else if (pcmk__str_eq(name, PCMK_XE_BUNDLE, pcmk__str_casei)) { return pcmk__rsc_variant_bundle; } return pcmk__rsc_variant_unknown; } /*! * \internal * \brief Insert a meta-attribute if not already present * * \param[in] key Meta-attribute name * \param[in] value Meta-attribute value to add if not already present * \param[in,out] table Meta-attribute hash table to insert into * * \note This is like pcmk__insert_meta() except it won't overwrite existing * values. */ static void dup_attr(gpointer key, gpointer value, gpointer user_data) { GHashTable *table = user_data; CRM_CHECK((key != NULL) && (table != NULL), return); if (pcmk__str_eq((const char *) value, "#default", pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting meta-attributes (such as %s) to " "the explicit value '#default' is deprecated and " "will be removed in a future release", (const char *) key); } else if ((value != NULL) && (g_hash_table_lookup(table, key) == NULL)) { pcmk__insert_dup(table, (const char *) key, (const char *) value); } } static void expand_parents_fixed_nvpairs(pcmk_resource_t *rsc, pe_rule_eval_data_t *rule_data, GHashTable *meta_hash, pcmk_scheduler_t *scheduler) { GHashTable *parent_orig_meta = pcmk__strkey_table(free, free); pcmk_resource_t *p = rsc->priv->parent; if (p == NULL) { return ; } /* Search all parent resources, get the fixed value of * PCMK_XE_META_ATTRIBUTES set only in the original xml, and stack it in the * hash table. The fixed value of the lower parent resource takes precedence * and is not overwritten. */ while(p != NULL) { /* A hash table for comparison is generated, including the id-ref. */ pe__unpack_dataset_nvpairs(p->priv->xml, PCMK_XE_META_ATTRIBUTES, rule_data, parent_orig_meta, NULL, scheduler); p = p->priv->parent; } if (parent_orig_meta != NULL) { // This will not overwrite any values already existing for child g_hash_table_foreach(parent_orig_meta, dup_attr, meta_hash); } if (parent_orig_meta != NULL) { g_hash_table_destroy(parent_orig_meta); } return ; } void get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t * rsc, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pe_rsc_eval_data_t rsc_rule_data = { .standard = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS), .provider = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER), .agent = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE) }; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .now = scheduler->priv->now, .match_data = NULL, .rsc_data = &rsc_rule_data, .op_data = NULL }; if (node) { /* @COMPAT Support for node attribute expressions in rules for * meta-attributes is deprecated. When we can break behavioral backward * compatibility, drop this block. */ rule_data.node_hash = node->priv->attrs; } for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->priv->xml); a != NULL; a = a->next) { if (a->children != NULL) { dup_attr((gpointer) a->name, (gpointer) a->children->content, meta_hash); } } pe__unpack_dataset_nvpairs(rsc->priv->xml, PCMK_XE_META_ATTRIBUTES, &rule_data, meta_hash, NULL, scheduler); /* Set the PCMK_XE_META_ATTRIBUTES explicitly set in the parent resource to * the hash table of the child resource. If it is already explicitly set as * a child, it will not be overwritten. */ if (rsc->priv->parent != NULL) { expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, scheduler); } /* check the defaults */ pe__unpack_dataset_nvpairs(scheduler->priv->rsc_defaults, PCMK_XE_META_ATTRIBUTES, &rule_data, meta_hash, NULL, scheduler); /* If there is PCMK_XE_META_ATTRIBUTES that the parent resource has not * explicitly set, set a value that is not set from PCMK_XE_RSC_DEFAULTS * either. The values already set up to this point will not be overwritten. */ if (rsc->priv->parent != NULL) { g_hash_table_foreach(rsc->priv->parent->priv->meta, dup_attr, meta_hash); } } /*! * \brief Get final values of a resource's instance attributes * * \param[in,out] instance_attrs Where to store the instance attributes * \param[in] rsc Resource to get instance attributes for * \param[in] node If not NULL, evaluate rules for this node * \param[in,out] scheduler Scheduler data */ void get_rsc_attributes(GHashTable *instance_attrs, const pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pe_rule_eval_data_t rule_data = { .node_hash = NULL, .now = NULL, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; CRM_CHECK((instance_attrs != NULL) && (rsc != NULL) && (scheduler != NULL), return); rule_data.now = scheduler->priv->now; if (node != NULL) { rule_data.node_hash = node->priv->attrs; } // Evaluate resource's own values, then its ancestors' values pe__unpack_dataset_nvpairs(rsc->priv->xml, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data, instance_attrs, NULL, scheduler); if (rsc->priv->parent != NULL) { get_rsc_attributes(instance_attrs, rsc->priv->parent, node, scheduler); } } static char * template_op_key(xmlNode * op) { const char *name = crm_element_value(op, PCMK_XA_NAME); const char *role = crm_element_value(op, PCMK_XA_ROLE); char *key = NULL; if ((role == NULL) || pcmk__strcase_any_of(role, PCMK_ROLE_STARTED, PCMK_ROLE_UNPROMOTED, PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) { role = PCMK__ROLE_UNKNOWN; } key = crm_strdup_printf("%s-%s", name, role); return key; } static gboolean unpack_template(xmlNode *xml_obj, xmlNode **expanded_xml, pcmk_scheduler_t *scheduler) { xmlNode *cib_resources = NULL; xmlNode *template = NULL; xmlNode *new_xml = NULL; xmlNode *child_xml = NULL; xmlNode *rsc_ops = NULL; xmlNode *template_ops = NULL; const char *template_ref = NULL; const char *id = NULL; if (xml_obj == NULL) { pcmk__config_err("No resource object for template unpacking"); return FALSE; } template_ref = crm_element_value(xml_obj, PCMK_XA_TEMPLATE); if (template_ref == NULL) { return TRUE; } id = pcmk__xe_id(xml_obj); if (id == NULL) { pcmk__config_err("'%s' object must have a id", xml_obj->name); return FALSE; } if (pcmk__str_eq(template_ref, id, pcmk__str_none)) { pcmk__config_err("The resource object '%s' should not reference itself", id); return FALSE; } cib_resources = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input, LOG_TRACE); if (cib_resources == NULL) { pcmk__config_err("No resources configured"); return FALSE; } template = pcmk__xe_first_child(cib_resources, PCMK_XE_TEMPLATE, PCMK_XA_ID, template_ref); if (template == NULL) { pcmk__config_err("No template named '%s'", template_ref); return FALSE; } new_xml = pcmk__xml_copy(NULL, template); xmlNodeSetName(new_xml, xml_obj->name); crm_xml_add(new_xml, PCMK_XA_ID, id); crm_xml_add(new_xml, PCMK__META_CLONE, crm_element_value(xml_obj, PCMK__META_CLONE)); template_ops = pcmk__xe_first_child(new_xml, PCMK_XE_OPERATIONS, NULL, NULL); for (child_xml = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL); child_xml != NULL; child_xml = pcmk__xe_next(child_xml)) { xmlNode *new_child = pcmk__xml_copy(new_xml, child_xml); if (pcmk__xe_is(new_child, PCMK_XE_OPERATIONS)) { rsc_ops = new_child; } } if (template_ops && rsc_ops) { xmlNode *op = NULL; GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL); for (op = pcmk__xe_first_child(rsc_ops, NULL, NULL, NULL); op != NULL; op = pcmk__xe_next(op)) { char *key = template_op_key(op); g_hash_table_insert(rsc_ops_hash, key, op); } for (op = pcmk__xe_first_child(template_ops, NULL, NULL, NULL); op != NULL; op = pcmk__xe_next(op)) { char *key = template_op_key(op); if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) { pcmk__xml_copy(rsc_ops, op); } free(key); } if (rsc_ops_hash) { g_hash_table_destroy(rsc_ops_hash); } pcmk__xml_free(template_ops); } /*pcmk__xml_free(*expanded_xml); */ *expanded_xml = new_xml; #if 0 /* Disable multi-level templates for now */ if (!unpack_template(new_xml, expanded_xml, scheduler)) { pcmk__xml_free(*expanded_xml); *expanded_xml = NULL; return FALSE; } #endif return TRUE; } static gboolean add_template_rsc(xmlNode *xml_obj, pcmk_scheduler_t *scheduler) { const char *template_ref = NULL; const char *id = NULL; if (xml_obj == NULL) { pcmk__config_err("No resource object for processing resource list " "of template"); return FALSE; } template_ref = crm_element_value(xml_obj, PCMK_XA_TEMPLATE); if (template_ref == NULL) { return TRUE; } id = pcmk__xe_id(xml_obj); if (id == NULL) { pcmk__config_err("'%s' object must have a id", xml_obj->name); return FALSE; } if (pcmk__str_eq(template_ref, id, pcmk__str_none)) { pcmk__config_err("The resource object '%s' should not reference itself", id); return FALSE; } pcmk__add_idref(scheduler->priv->templates, template_ref, id); return TRUE; } /*! * \internal * \brief Check whether a clone or instance being unpacked is globally unique * * \param[in] rsc Clone or clone instance to check * * \return \c true if \p rsc is globally unique according to its * meta-attributes, otherwise \c false */ static bool detect_unique(const pcmk_resource_t *rsc) { const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_GLOBALLY_UNIQUE); if (value == NULL) { // Default to true if clone-node-max > 1 value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_CLONE_NODE_MAX); if (value != NULL) { int node_max = 1; if ((pcmk__scan_min_int(value, &node_max, 0) == pcmk_rc_ok) && (node_max > 1)) { return true; } } return false; } return crm_is_true(value); } static void free_params_table(gpointer data) { g_hash_table_destroy((GHashTable *) data); } /*! * \brief Get a table of resource parameters * * \param[in,out] rsc Resource to query * \param[in] node Node for evaluating rules (NULL for defaults) * \param[in,out] scheduler Scheduler data * * \return Hash table containing resource parameter names and values * (or NULL if \p rsc or \p scheduler is NULL) * \note The returned table will be destroyed when the resource is freed, so * callers should not destroy it. */ GHashTable * pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { GHashTable *params_on_node = NULL; /* A NULL node is used to request the resource's default parameters * (not evaluated for node), but we always want something non-NULL * as a hash table key. */ const char *node_name = ""; // Sanity check if ((rsc == NULL) || (scheduler == NULL)) { return NULL; } if ((node != NULL) && (node->priv->name != NULL)) { node_name = node->priv->name; } // Find the parameter table for given node if (rsc->priv->parameter_cache == NULL) { rsc->priv->parameter_cache = pcmk__strikey_table(free, free_params_table); } else { params_on_node = g_hash_table_lookup(rsc->priv->parameter_cache, node_name); } // If none exists yet, create one with parameters evaluated for node if (params_on_node == NULL) { params_on_node = pcmk__strkey_table(free, free); get_rsc_attributes(params_on_node, rsc, node, scheduler); g_hash_table_insert(rsc->priv->parameter_cache, strdup(node_name), params_on_node); } return params_on_node; } /*! * \internal * \brief Unpack a resource's \c PCMK_META_REQUIRES meta-attribute * * \param[in,out] rsc Resource being unpacked * \param[in] value Value of \c PCMK_META_REQUIRES meta-attribute * \param[in] is_default Whether \p value was selected by default */ static void unpack_requires(pcmk_resource_t *rsc, const char *value, bool is_default) { const pcmk_scheduler_t *scheduler = rsc->priv->scheduler; if (pcmk__str_eq(value, PCMK_VALUE_NOTHING, pcmk__str_casei)) { } else if (pcmk__str_eq(value, PCMK_VALUE_QUORUM, pcmk__str_casei)) { pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_quorum); } else if (pcmk__str_eq(value, PCMK_VALUE_FENCING, pcmk__str_casei)) { pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_fencing); if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { pcmk__config_warn("%s requires fencing but fencing is disabled", rsc->id); } } else if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) { if (pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) { pcmk__config_warn("Resetting \"" PCMK_META_REQUIRES "\" for %s " "to \"" PCMK_VALUE_QUORUM "\" because fencing " "devices cannot require unfencing", rsc->id); unpack_requires(rsc, PCMK_VALUE_QUORUM, true); return; } else if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { pcmk__config_warn("Resetting \"" PCMK_META_REQUIRES "\" for %s " "to \"" PCMK_VALUE_QUORUM "\" because fencing is " "disabled", rsc->id); unpack_requires(rsc, PCMK_VALUE_QUORUM, true); return; } else { pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_fencing |pcmk__rsc_needs_unfencing); } } else { const char *orig_value = value; if (pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) { value = PCMK_VALUE_QUORUM; } else if (pcmk__is_primitive(rsc) && xml_contains_remote_node(rsc->priv->xml)) { value = PCMK_VALUE_QUORUM; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_enable_unfencing)) { value = PCMK_VALUE_UNFENCING; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { value = PCMK_VALUE_FENCING; } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) { value = PCMK_VALUE_NOTHING; } else { value = PCMK_VALUE_QUORUM; } if (orig_value != NULL) { pcmk__config_err("Resetting '" PCMK_META_REQUIRES "' for %s " "to '%s' because '%s' is not valid", rsc->id, value, orig_value); } unpack_requires(rsc, value, true); return; } pcmk__rsc_trace(rsc, "\tRequired to start: %s%s", value, (is_default? " (default)" : "")); } static void warn_about_deprecated_classes(pcmk_resource_t *rsc) { const char *std = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); - if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) { - pcmk__warn_once(pcmk__wo_upstart, - "Support for Upstart resources (such as %s) is " - "deprecated and will be removed in a future release", - rsc->id); - - } else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) { + if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) { pcmk__warn_once(pcmk__wo_nagios, "Support for Nagios resources (such as %s) is " "deprecated and will be removed in a future release", rsc->id); } } /*! * \internal * \brief Parse resource priority from meta-attribute * * \param[in,out] rsc Resource being unpacked */ static void unpack_priority(pcmk_resource_t *rsc) { const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_PRIORITY); int rc = pcmk_parse_score(value, &(rsc->priv->priority), 0); if (rc != pcmk_rc_ok) { pcmk__config_warn("Using default (0) for resource %s " PCMK_META_PRIORITY " because '%s' is not a valid value: %s", rsc->id, value, pcmk_rc_str(rc)); } } /*! * \internal * \brief Parse resource stickiness from meta-attribute * * \param[in,out] rsc Resource being unpacked */ static void unpack_stickiness(pcmk_resource_t *rsc) { const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_RESOURCE_STICKINESS); if (pcmk__str_eq(value, PCMK_VALUE_DEFAULT, pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting " PCMK_META_RESOURCE_STICKINESS " to the explicit value '" PCMK_VALUE_DEFAULT "' is deprecated and will be removed in a " "future release (just leave it unset)"); } else { int rc = pcmk_parse_score(value, &(rsc->priv->stickiness), 0); if (rc != pcmk_rc_ok) { pcmk__config_warn("Using default (0) for resource %s " PCMK_META_RESOURCE_STICKINESS " because '%s' is not a valid value: %s", rsc->id, value, pcmk_rc_str(rc)); } } } /*! * \internal * \brief Parse resource migration threshold from meta-attribute * * \param[in,out] rsc Resource being unpacked */ static void unpack_migration_threshold(pcmk_resource_t *rsc) { const char *value = g_hash_table_lookup(rsc->priv->meta, PCMK_META_MIGRATION_THRESHOLD); if (pcmk__str_eq(value, PCMK_VALUE_DEFAULT, pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting " PCMK_META_MIGRATION_THRESHOLD " to the explicit value '" PCMK_VALUE_DEFAULT "' is deprecated and will be removed in a " "future release (just leave it unset)"); rsc->priv->ban_after_failures = PCMK_SCORE_INFINITY; } else { int rc = pcmk_parse_score(value, &(rsc->priv->ban_after_failures), PCMK_SCORE_INFINITY); if ((rc != pcmk_rc_ok) || (rsc->priv->ban_after_failures < 0)) { pcmk__config_warn("Using default (" PCMK_VALUE_INFINITY ") for resource %s meta-attribute " PCMK_META_MIGRATION_THRESHOLD " because '%s' is not a valid value: %s", rsc->id, value, pcmk_rc_str(rc)); rsc->priv->ban_after_failures = PCMK_SCORE_INFINITY; } } } /*! * \internal * \brief Unpack configuration XML for a given resource * * Unpack the XML object containing a resource's configuration into a new * \c pcmk_resource_t object. * * \param[in] xml_obj XML node containing the resource's configuration * \param[out] rsc Where to store the unpacked resource information * \param[in] parent Resource's parent, if any * \param[in,out] scheduler Scheduler data * * \return Standard Pacemaker return code * \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and * the caller is responsible for freeing it using its variant-specific * free() method. Otherwise, \p *rsc is guaranteed to be NULL. */ int pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc, pcmk_resource_t *parent, pcmk_scheduler_t *scheduler) { xmlNode *expanded_xml = NULL; xmlNode *ops = NULL; const char *value = NULL; const char *id = NULL; bool guest_node = false; bool remote_node = false; pcmk__resource_private_t *rsc_private = NULL; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .now = NULL, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; CRM_CHECK(rsc != NULL, return EINVAL); CRM_CHECK((xml_obj != NULL) && (scheduler != NULL), *rsc = NULL; return EINVAL); rule_data.now = scheduler->priv->now; crm_log_xml_trace(xml_obj, "[raw XML]"); id = crm_element_value(xml_obj, PCMK_XA_ID); if (id == NULL) { pcmk__config_err("Ignoring <%s> configuration without " PCMK_XA_ID, xml_obj->name); return pcmk_rc_unpack_error; } if (unpack_template(xml_obj, &expanded_xml, scheduler) == FALSE) { return pcmk_rc_unpack_error; } *rsc = calloc(1, sizeof(pcmk_resource_t)); if (*rsc == NULL) { pcmk__sched_err(scheduler, "Unable to allocate memory for resource '%s'", id); return ENOMEM; } (*rsc)->priv = calloc(1, sizeof(pcmk__resource_private_t)); if ((*rsc)->priv == NULL) { pcmk__sched_err(scheduler, "Unable to allocate memory for resource '%s'", id); free(*rsc); return ENOMEM; } rsc_private = (*rsc)->priv; rsc_private->scheduler = scheduler; if (expanded_xml) { crm_log_xml_trace(expanded_xml, "[expanded XML]"); rsc_private->xml = expanded_xml; rsc_private->orig_xml = xml_obj; } else { rsc_private->xml = xml_obj; rsc_private->orig_xml = NULL; } /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */ rsc_private->parent = parent; ops = pcmk__xe_first_child(rsc_private->xml, PCMK_XE_OPERATIONS, NULL, NULL); rsc_private->ops_xml = pcmk__xe_resolve_idref(ops, scheduler->input); rsc_private->variant = get_resource_type((const char *) rsc_private->xml->name); if (rsc_private->variant == pcmk__rsc_variant_unknown) { pcmk__config_err("Ignoring resource '%s' of unknown type '%s'", id, rsc_private->xml->name); common_free(*rsc); *rsc = NULL; return pcmk_rc_unpack_error; } rsc_private->meta = pcmk__strkey_table(free, free); rsc_private->utilization = pcmk__strkey_table(free, free); rsc_private->probed_nodes = pcmk__strkey_table(NULL, free); rsc_private->allowed_nodes = pcmk__strkey_table(NULL, free); value = crm_element_value(rsc_private->xml, PCMK__META_CLONE); if (value) { (*rsc)->id = crm_strdup_printf("%s:%s", id, value); pcmk__insert_meta(rsc_private, PCMK__META_CLONE, value); } else { (*rsc)->id = strdup(id); } warn_about_deprecated_classes(*rsc); rsc_private->fns = &resource_class_functions[rsc_private->variant]; get_meta_attributes(rsc_private->meta, *rsc, NULL, scheduler); (*rsc)->flags = 0; pcmk__set_rsc_flags(*rsc, pcmk__rsc_unassigned); if (!pcmk_is_set(scheduler->flags, pcmk__sched_in_maintenance)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_managed); } rsc_private->orig_role = pcmk_role_stopped; rsc_private->next_role = pcmk_role_unknown; unpack_priority(*rsc); value = g_hash_table_lookup(rsc_private->meta, PCMK_META_CRITICAL); if ((value == NULL) || crm_is_true(value)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_critical); } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_NOTIFY); if (crm_is_true(value)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_notify); } if (xml_contains_remote_node(rsc_private->xml)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_is_remote_connection); if (g_hash_table_lookup(rsc_private->meta, PCMK__META_CONTAINER)) { guest_node = true; } else { remote_node = true; } } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_ALLOW_MIGRATE); if (crm_is_true(value)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_migratable); } else if ((value == NULL) && remote_node) { /* By default, we want remote nodes to be able * to float around the cluster without having to stop all the * resources within the remote-node before moving. Allowing * migration support enables this feature. If this ever causes * problems, migration support can be explicitly turned off with * PCMK_META_ALLOW_MIGRATE=false. */ pcmk__set_rsc_flags(*rsc, pcmk__rsc_migratable); } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_IS_MANAGED); if (value != NULL) { if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) { // @COMPAT Deprecated since 2.1.8 pcmk__config_warn("Support for setting " PCMK_META_IS_MANAGED " to the explicit value '" PCMK_VALUE_DEFAULT "' is deprecated and will be removed in a " "future release (just leave it unset)"); } else if (crm_is_true(value)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_managed); } else { pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed); } } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_MAINTENANCE); if (crm_is_true(value)) { pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(*rsc, pcmk__rsc_maintenance); } if (pcmk_is_set(scheduler->flags, pcmk__sched_in_maintenance)) { pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed); pcmk__set_rsc_flags(*rsc, pcmk__rsc_maintenance); } if (pcmk__is_clone(pe__const_top_resource(*rsc, false))) { if (detect_unique(*rsc)) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_unique); } if (crm_is_true(g_hash_table_lookup((*rsc)->priv->meta, PCMK_META_PROMOTABLE))) { pcmk__set_rsc_flags(*rsc, pcmk__rsc_promotable); } } else { pcmk__set_rsc_flags(*rsc, pcmk__rsc_unique); } // @COMPAT Deprecated meta-attribute value = g_hash_table_lookup(rsc_private->meta, PCMK__META_RESTART_TYPE); if (pcmk__str_eq(value, PCMK_VALUE_RESTART, pcmk__str_casei)) { rsc_private->restart_type = pcmk__restart_restart; pcmk__rsc_trace(*rsc, "%s dependency restart handling: restart", (*rsc)->id); pcmk__warn_once(pcmk__wo_restart_type, "Support for " PCMK__META_RESTART_TYPE " is deprecated " "and will be removed in a future release"); } else { rsc_private->restart_type = pcmk__restart_ignore; pcmk__rsc_trace(*rsc, "%s dependency restart handling: ignore", (*rsc)->id); } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_MULTIPLE_ACTIVE); if (pcmk__str_eq(value, PCMK_VALUE_STOP_ONLY, pcmk__str_casei)) { rsc_private->multiply_active_policy = pcmk__multiply_active_stop; pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: stop only", (*rsc)->id); } else if (pcmk__str_eq(value, PCMK_VALUE_BLOCK, pcmk__str_casei)) { rsc_private->multiply_active_policy = pcmk__multiply_active_block; pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: block", (*rsc)->id); } else if (pcmk__str_eq(value, PCMK_VALUE_STOP_UNEXPECTED, pcmk__str_casei)) { rsc_private->multiply_active_policy = pcmk__multiply_active_unexpected; pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: " "stop unexpected instances", (*rsc)->id); } else { // PCMK_VALUE_STOP_START if (!pcmk__str_eq(value, PCMK_VALUE_STOP_START, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_warn("%s is not a valid value for " PCMK_META_MULTIPLE_ACTIVE ", using default of " "\"" PCMK_VALUE_STOP_START "\"", value); } rsc_private->multiply_active_policy = pcmk__multiply_active_restart; pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: stop/start", (*rsc)->id); } unpack_stickiness(*rsc); unpack_migration_threshold(*rsc); if (pcmk__str_eq(crm_element_value(rsc_private->xml, PCMK_XA_CLASS), PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_fencing); pcmk__set_rsc_flags(*rsc, pcmk__rsc_fence_device); } value = g_hash_table_lookup(rsc_private->meta, PCMK_META_REQUIRES); unpack_requires(*rsc, value, false); value = g_hash_table_lookup(rsc_private->meta, PCMK_META_FAILURE_TIMEOUT); if (value != NULL) { pcmk_parse_interval_spec(value, &(rsc_private->failure_expiration_ms)); } if (remote_node) { GHashTable *params = pe_rsc_params(*rsc, NULL, scheduler); /* Grabbing the value now means that any rules based on node attributes * will evaluate to false, so such rules should not be used with * PCMK_REMOTE_RA_RECONNECT_INTERVAL. * * @TODO Evaluate per node before using */ value = g_hash_table_lookup(params, PCMK_REMOTE_RA_RECONNECT_INTERVAL); if (value) { /* reconnect delay works by setting failure_timeout and preventing the * connection from starting until the failure is cleared. */ pcmk_parse_interval_spec(value, &(rsc_private->remote_reconnect_ms)); /* We want to override any default failure_timeout in use when remote * PCMK_REMOTE_RA_RECONNECT_INTERVAL is in use. */ rsc_private->failure_expiration_ms = rsc_private->remote_reconnect_ms; } } get_target_role(*rsc, &(rsc_private->next_role)); pcmk__rsc_trace(*rsc, "%s desired next state: %s", (*rsc)->id, (rsc_private->next_role == pcmk_role_unknown)? "default" : pcmk_role_text(rsc_private->next_role)); if (rsc_private->fns->unpack(*rsc, scheduler) == FALSE) { rsc_private->fns->free(*rsc); *rsc = NULL; return pcmk_rc_unpack_error; } if (pcmk_is_set(scheduler->flags, pcmk__sched_symmetric_cluster)) { // This tag must stay exactly the same because it is tested elsewhere resource_location(*rsc, NULL, 0, "symmetric_default", scheduler); } else if (guest_node) { /* remote resources tied to a container resource must always be allowed * to opt-in to the cluster. Whether the connection resource is actually * allowed to be placed on a node is dependent on the container resource */ resource_location(*rsc, NULL, 0, "remote_connection_default", scheduler); } pcmk__rsc_trace(*rsc, "%s action notification: %s", (*rsc)->id, pcmk_is_set((*rsc)->flags, pcmk__rsc_notify)? "required" : "not required"); pe__unpack_dataset_nvpairs(rsc_private->xml, PCMK_XE_UTILIZATION, &rule_data, rsc_private->utilization, NULL, scheduler); if (expanded_xml) { if (add_template_rsc(xml_obj, scheduler) == FALSE) { rsc_private->fns->free(*rsc); *rsc = NULL; return pcmk_rc_unpack_error; } } return pcmk_rc_ok; } gboolean is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc) { pcmk_resource_t *parent = child; if (parent == NULL || rsc == NULL) { return FALSE; } while (parent->priv->parent != NULL) { if (parent->priv->parent == rsc) { return TRUE; } parent = parent->priv->parent; } return FALSE; } pcmk_resource_t * uber_parent(pcmk_resource_t *rsc) { pcmk_resource_t *parent = rsc; if (parent == NULL) { return NULL; } while ((parent->priv->parent != NULL) && !pcmk__is_bundle(parent->priv->parent)) { parent = parent->priv->parent; } return parent; } /*! * \internal * \brief Get the topmost parent of a resource as a const pointer * * \param[in] rsc Resource to check * \param[in] include_bundle If true, go all the way to bundle * * \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent, * the bundle if \p rsc is bundled and \p include_bundle is true, * otherwise the topmost parent of \p rsc up to a clone */ const pcmk_resource_t * pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle) { const pcmk_resource_t *parent = rsc; if (parent == NULL) { return NULL; } while (parent->priv->parent != NULL) { if (!include_bundle && pcmk__is_bundle(parent->priv->parent)) { break; } parent = parent->priv->parent; } return parent; } void common_free(pcmk_resource_t * rsc) { if (rsc == NULL) { return; } pcmk__rsc_trace(rsc, "Freeing %s", rsc->id); if (rsc->priv->parameter_cache != NULL) { g_hash_table_destroy(rsc->priv->parameter_cache); } if ((rsc->priv->parent == NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { pcmk__xml_free(rsc->priv->xml); rsc->priv->xml = NULL; pcmk__xml_free(rsc->priv->orig_xml); rsc->priv->orig_xml = NULL; } else if (rsc->priv->orig_xml != NULL) { // rsc->private->xml was expanded from a template pcmk__xml_free(rsc->priv->xml); rsc->priv->xml = NULL; } free(rsc->id); free(rsc->priv->variant_opaque); free(rsc->priv->history_id); free(rsc->priv->pending_action); free(rsc->priv->assigned_node); g_list_free(rsc->priv->actions); g_list_free(rsc->priv->active_nodes); g_list_free(rsc->priv->launched); g_list_free(rsc->priv->dangling_migration_sources); g_list_free(rsc->priv->with_this_colocations); g_list_free(rsc->priv->this_with_colocations); g_list_free(rsc->priv->location_constraints); g_list_free(rsc->priv->ticket_constraints); if (rsc->priv->meta != NULL) { g_hash_table_destroy(rsc->priv->meta); } if (rsc->priv->utilization != NULL) { g_hash_table_destroy(rsc->priv->utilization); } if (rsc->priv->probed_nodes != NULL) { g_hash_table_destroy(rsc->priv->probed_nodes); } if (rsc->priv->allowed_nodes != NULL) { g_hash_table_destroy(rsc->priv->allowed_nodes); } free(rsc->priv); free(rsc); } /*! * \internal * \brief Count a node and update most preferred to it as appropriate * * \param[in] rsc An active resource * \param[in] node A node that \p rsc is active on * \param[in,out] active This will be set to \p node if \p node is more * preferred than the current value * \param[in,out] count_all If not NULL, this will be incremented * \param[in,out] count_clean If not NULL, this will be incremented if \p node * is online and clean * * \return true if the count should continue, or false if sufficiently known */ bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_node_t **active, unsigned int *count_all, unsigned int *count_clean) { bool keep_looking = false; bool is_happy = false; CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL), return false); is_happy = node->details->online && !node->details->unclean; if (count_all != NULL) { ++*count_all; } if ((count_clean != NULL) && is_happy) { ++*count_clean; } if ((count_all != NULL) || (count_clean != NULL)) { keep_looking = true; // We're counting, so go through entire list } if (rsc->priv->partial_migration_source != NULL) { if (pcmk__same_node(node, rsc->priv->partial_migration_source)) { *active = node; // This is the migration source } else { keep_looking = true; } } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) { if (is_happy && ((*active == NULL) || !(*active)->details->online || (*active)->details->unclean)) { *active = node; // This is the first clean node } else { keep_looking = true; } } if (*active == NULL) { *active = node; // This is the first node checked } return keep_looking; } // Shared implementation of pcmk__rsc_methods_t:active_node() static pcmk_node_t * active_node(const pcmk_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean) { pcmk_node_t *active = NULL; if (count_all != NULL) { *count_all = 0; } if (count_clean != NULL) { *count_clean = 0; } if (rsc == NULL) { return NULL; } for (GList *iter = rsc->priv->active_nodes; iter != NULL; iter = iter->next) { if (!pe__count_active_node(rsc, (pcmk_node_t *) iter->data, &active, count_all, count_clean)) { break; // Don't waste time iterating if we don't have to } } return active; } /*! * \brief * \internal Find and count active nodes according to \c PCMK_META_REQUIRES * * \param[in] rsc Resource to check * \param[out] count If not NULL, will be set to count of active nodes * * \return An active node (or NULL if resource is not active anywhere) * * \note This is a convenience wrapper for active_node() where the count of all * active nodes or only clean active nodes is desired according to the * \c PCMK_META_REQUIRES meta-attribute. */ pcmk_node_t * pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count) { if (rsc == NULL) { if (count != NULL) { *count = 0; } return NULL; } if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) { return rsc->priv->fns->active_node(rsc, count, NULL); } else { return rsc->priv->fns->active_node(rsc, NULL, count); } } void pe__count_common(pcmk_resource_t *rsc) { if (rsc->priv->children != NULL) { for (GList *item = rsc->priv->children; item != NULL; item = item->next) { pcmk_resource_t *child = item->data; child->priv->fns->count(item->data); } } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed) || (rsc->priv->orig_role > pcmk_role_stopped)) { rsc->priv->scheduler->priv->ninstances++; if (pe__resource_is_disabled(rsc)) { rsc->priv->scheduler->priv->disabled_resources++; } if (pcmk_is_set(rsc->flags, pcmk__rsc_blocked)) { rsc->priv->scheduler->priv->blocked_resources++; } } } /*! * \internal * \brief Update a resource's next role * * \param[in,out] rsc Resource to be updated * \param[in] role Resource's new next role * \param[in] why Human-friendly reason why role is changing (for logs) */ void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why) { CRM_ASSERT((rsc != NULL) && (why != NULL)); if (rsc->priv->next_role != role) { pcmk__rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)", rsc->id, pcmk_role_text(rsc->priv->next_role), pcmk_role_text(role), why); rsc->priv->next_role = role; } } diff --git a/lib/services/Makefile.am b/lib/services/Makefile.am index aa630c7802..d4af2f7b03 100644 --- a/lib/services/Makefile.am +++ b/lib/services/Makefile.am @@ -1,44 +1,41 @@ # # Copyright 2012-2024 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU Lesser General Public License # version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. # MAINTAINERCLEANFILES = Makefile.in AM_CPPFLAGS = -I$(top_srcdir)/include lib_LTLIBRARIES = libcrmservice.la noinst_HEADERS = $(wildcard *.h) libcrmservice_la_LDFLAGS = -version-info 32:1:4 libcrmservice_la_CFLAGS = libcrmservice_la_CFLAGS += $(CFLAGS_HARDENED_LIB) libcrmservice_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) libcrmservice_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(DBUS_LIBS) ## Library sources (*must* use += format for bumplibs) libcrmservice_la_SOURCES = services.c libcrmservice_la_SOURCES += services_linux.c libcrmservice_la_SOURCES += services_ocf.c if BUILD_LSB libcrmservice_la_SOURCES += services_lsb.c endif if BUILD_DBUS libcrmservice_la_SOURCES += dbus.c endif -if BUILD_UPSTART -libcrmservice_la_SOURCES += upstart.c -endif if BUILD_SYSTEMD libcrmservice_la_SOURCES += systemd.c endif if BUILD_NAGIOS libcrmservice_la_SOURCES += services_nagios.c endif diff --git a/lib/services/services.c b/lib/services/services.c index 1733ce3da1..5823b132d6 100644 --- a/lib/services/services.c +++ b/lib/services/services.c @@ -1,1464 +1,1400 @@ /* * Copyright 2010-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "services_private.h" #include "services_ocf.h" #if PCMK__ENABLE_LSB #include "services_lsb.h" #endif -#if SUPPORT_UPSTART -# include -#endif - #if SUPPORT_SYSTEMD # include #endif #if SUPPORT_NAGIOS # include #endif /* TODO: Develop a rollover strategy */ static int operations = 0; static GHashTable *recurring_actions = NULL; /* ops waiting to run async because of conflicting active * pending ops */ static GList *blocked_ops = NULL; /* ops currently active (in-flight) */ static GList *inflight_ops = NULL; static void handle_blocked_ops(void); /*! * \brief Find first service class that can provide a specified agent * * \param[in] agent Name of agent to search for * * \return Service class if found, NULL otherwise * - * \note The priority is LSB, then systemd, then upstart. It would be preferable - * to put systemd first, but LSB merely requires a file existence check, - * while systemd requires contacting D-Bus. + * \note The priority is LSB then systemd. It would be preferable to put systemd + * first, but LSB merely requires a file existence check, while systemd + * requires contacting DBus. */ const char * resources_find_service_class(const char *agent) { #if PCMK__ENABLE_LSB if (services__lsb_agent_exists(agent)) { return PCMK_RESOURCE_CLASS_LSB; } #endif #if SUPPORT_SYSTEMD if (systemd_unit_exists(agent)) { return PCMK_RESOURCE_CLASS_SYSTEMD; } #endif -#if SUPPORT_UPSTART - if (upstart_job_exists(agent)) { - return PCMK_RESOURCE_CLASS_UPSTART; - } -#endif return NULL; } static inline void init_recurring_actions(void) { if (recurring_actions == NULL) { recurring_actions = pcmk__strkey_table(NULL, NULL); } } /*! * \internal - * \brief Check whether op is in-flight systemd or upstart op + * \brief Check whether op is in-flight systemd op * * \param[in] op Operation to check * - * \return TRUE if op is in-flight systemd or upstart op + * \return TRUE if op is in-flight systemd op */ static inline gboolean -inflight_systemd_or_upstart(const svc_action_t *op) +inflight_systemd(const svc_action_t *op) { - return pcmk__strcase_any_of(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD, - PCMK_RESOURCE_CLASS_UPSTART, NULL) && - g_list_find(inflight_ops, op) != NULL; + return pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD, + pcmk__str_casei) + && (g_list_find(inflight_ops, op) != NULL); } /*! * \internal * \brief Expand "service" alias to an actual resource class * * \param[in] rsc Resource name (for logging only) * \param[in] standard Resource class as configured * \param[in] agent Agent name to look for * * \return Newly allocated string with actual resource class * * \note The caller is responsible for calling free() on the result. */ static char * expand_resource_class(const char *rsc, const char *standard, const char *agent) { char *expanded_class = NULL; #if PCMK__ENABLE_SERVICE if (strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) == 0) { const char *found_class = resources_find_service_class(agent); if (found_class != NULL) { crm_debug("Found %s agent %s for %s", found_class, agent, rsc); expanded_class = pcmk__str_copy(found_class); } else { const char *default_standard = NULL; #if PCMK__ENABLE_LSB default_standard = PCMK_RESOURCE_CLASS_LSB; #elif SUPPORT_SYSTEMD default_standard = PCMK_RESOURCE_CLASS_SYSTEMD; -#elif SUPPORT_UPSTART - default_standard = PCMK_RESOURCE_CLASS_UPSTART; #else #error No standards supported for service alias (configure script bug) #endif crm_info("Assuming resource class %s for agent %s for %s", default_standard, agent, rsc); expanded_class = pcmk__str_copy(default_standard); } } #endif if (expanded_class == NULL) { expanded_class = pcmk__str_copy(standard); } return expanded_class; } /*! * \internal * \brief Create a simple svc_action_t instance * * \return Newly allocated instance (or NULL if not enough memory) */ static svc_action_t * new_action(void) { svc_action_t *op = calloc(1, sizeof(svc_action_t)); if (op == NULL) { return NULL; } op->opaque = calloc(1, sizeof(svc_action_private_t)); if (op->opaque == NULL) { free(op); return NULL; } // Initialize result services__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_UNKNOWN, NULL); return op; } static bool required_argument_missing(uint32_t ra_caps, const char *name, const char *standard, const char *provider, const char *agent, const char *action) { if (pcmk__str_empty(name)) { crm_info("Cannot create operation without resource name (bug?)"); return true; } if (pcmk__str_empty(standard)) { crm_info("Cannot create operation for %s without resource class (bug?)", name); return true; } if (pcmk_is_set(ra_caps, pcmk_ra_cap_provider) && pcmk__str_empty(provider)) { crm_info("Cannot create operation for %s resource %s " "without provider (bug?)", standard, name); return true; } if (pcmk__str_empty(agent)) { crm_info("Cannot create operation for %s without agent name (bug?)", name); return true; } if (pcmk__str_empty(action)) { crm_info("Cannot create operation for %s without action name (bug?)", name); return true; } return false; } // \return Standard Pacemaker return code (pcmk_rc_ok or ENOMEM) static int copy_action_arguments(svc_action_t *op, uint32_t ra_caps, const char *name, const char *standard, const char *provider, const char *agent, const char *action) { op->rsc = strdup(name); if (op->rsc == NULL) { return ENOMEM; } op->agent = strdup(agent); if (op->agent == NULL) { return ENOMEM; } op->standard = expand_resource_class(name, standard, agent); if (op->standard == NULL) { return ENOMEM; } if (pcmk_is_set(ra_caps, pcmk_ra_cap_status) && pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { action = PCMK_ACTION_STATUS; } op->action = strdup(action); if (op->action == NULL) { return ENOMEM; } if (pcmk_is_set(ra_caps, pcmk_ra_cap_provider)) { op->provider = strdup(provider); if (op->provider == NULL) { return ENOMEM; } } return pcmk_rc_ok; } svc_action_t * services__create_resource_action(const char *name, const char *standard, const char *provider, const char *agent, const char *action, guint interval_ms, int timeout, GHashTable *params, enum svc_action_flags flags) { svc_action_t *op = NULL; uint32_t ra_caps = pcmk_get_ra_caps(standard); int rc = pcmk_rc_ok; op = new_action(); if (op == NULL) { crm_crit("Cannot prepare action: %s", strerror(ENOMEM)); if (params != NULL) { g_hash_table_destroy(params); } return NULL; } op->interval_ms = interval_ms; op->timeout = timeout; op->flags = flags; op->sequence = ++operations; // Take ownership of params if (pcmk_is_set(ra_caps, pcmk_ra_cap_params)) { op->params = params; } else if (params != NULL) { g_hash_table_destroy(params); params = NULL; } if (required_argument_missing(ra_caps, name, standard, provider, agent, action)) { services__set_result(op, services__generic_error(op), PCMK_EXEC_ERROR_FATAL, "Required agent or action information missing"); return op; } op->id = pcmk__op_key(name, action, interval_ms); if (copy_action_arguments(op, ra_caps, name, standard, provider, agent, action) != pcmk_rc_ok) { crm_crit("Cannot prepare %s action for %s: %s", action, name, strerror(ENOMEM)); services__handle_exec_error(op, ENOMEM); return op; } if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_OCF) == 0) { rc = services__ocf_prepare(op); #if PCMK__ENABLE_LSB } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_LSB) == 0) { rc = services__lsb_prepare(op); #endif #if SUPPORT_SYSTEMD } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) { rc = services__systemd_prepare(op); #endif -#if SUPPORT_UPSTART - } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_UPSTART) == 0) { - rc = services__upstart_prepare(op); -#endif #if SUPPORT_NAGIOS } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_NAGIOS) == 0) { rc = services__nagios_prepare(op); #endif } else { crm_info("Unknown resource standard: %s", op->standard); rc = ENOENT; } if (rc != pcmk_rc_ok) { crm_info("Cannot prepare %s operation for %s: %s", action, name, strerror(rc)); services__handle_exec_error(op, rc); } return op; } svc_action_t * resources_action_create(const char *name, const char *standard, const char *provider, const char *agent, const char *action, guint interval_ms, int timeout, GHashTable *params, enum svc_action_flags flags) { svc_action_t *op = services__create_resource_action(name, standard, provider, agent, action, interval_ms, timeout, params, flags); if (op == NULL || op->rc != 0) { services_action_free(op); return NULL; } else { // Preserve public API backward compatibility op->rc = PCMK_OCF_OK; op->status = PCMK_EXEC_DONE; return op; } } svc_action_t * services_action_create_generic(const char *exec, const char *args[]) { svc_action_t *op = new_action(); pcmk__mem_assert(op); op->opaque->exec = strdup(exec); op->opaque->args[0] = strdup(exec); if ((op->opaque->exec == NULL) || (op->opaque->args[0] == NULL)) { crm_crit("Cannot prepare action for '%s': %s", exec, strerror(ENOMEM)); services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, strerror(ENOMEM)); return op; } if (args == NULL) { return op; } for (int cur_arg = 1; args[cur_arg - 1] != NULL; cur_arg++) { if (cur_arg == PCMK__NELEM(op->opaque->args)) { crm_info("Cannot prepare action for '%s': Too many arguments", exec); services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR_HARD, "Too many arguments"); break; } op->opaque->args[cur_arg] = strdup(args[cur_arg - 1]); if (op->opaque->args[cur_arg] == NULL) { crm_crit("Cannot prepare action for '%s': %s", exec, strerror(ENOMEM)); services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, strerror(ENOMEM)); break; } } return op; } /*! * \brief Create an alert agent action * * \param[in] id Alert ID * \param[in] exec Path to alert agent executable * \param[in] timeout Action timeout * \param[in] params Parameters to use with action * \param[in] sequence Action sequence number * \param[in] cb_data Data to pass to callback function * * \return New action on success, NULL on error * \note It is the caller's responsibility to free cb_data. * The caller should not free params explicitly. */ svc_action_t * services_alert_create(const char *id, const char *exec, int timeout, GHashTable *params, int sequence, void *cb_data) { svc_action_t *action = services_action_create_generic(exec, NULL); action->id = pcmk__str_copy(id); action->standard = pcmk__str_copy(PCMK_RESOURCE_CLASS_ALERT); action->timeout = timeout; action->params = params; action->sequence = sequence; action->cb_data = cb_data; return action; } /*! * \brief Set the user and group that an action will execute as * * \param[in,out] op Action to modify * \param[in] user Name of user to execute action as * \param[in] group Name of group to execute action as * * \return pcmk_ok on success, -errno otherwise * * \note This will have no effect unless the process executing the action runs - * as root, and the action is not a systemd or upstart action. - * We could implement this for systemd by adding User= and Group= to - * [Service] in the override file, but that seems more likely to cause - * problems than be useful. + * as root and the action is not a systemd action. We could implement this + * for systemd by adding User= and Group= to [Service] in the override + * file, but that seems more likely to cause problems than be useful. */ int services_action_user(svc_action_t *op, const char *user) { CRM_CHECK((op != NULL) && (user != NULL), return -EINVAL); return crm_user_lookup(user, &(op->opaque->uid), &(op->opaque->gid)); } /*! * \brief Execute an alert agent action * * \param[in,out] action Action to execute * \param[in] cb Function to call when action completes * * \return TRUE if the library will free action, FALSE otherwise * * \note If this function returns FALSE, it is the caller's responsibility to * free the action with services_action_free(). However, unless someone * intentionally creates a recurring alert action, this will never return * FALSE. */ gboolean services_alert_async(svc_action_t *action, void (*cb)(svc_action_t *op)) { action->synchronous = false; action->opaque->callback = cb; return services__execute_file(action) == pcmk_rc_ok; } #if HAVE_DBUS /*! * \internal * \brief Update operation's pending DBus call, unreferencing old one if needed * * \param[in,out] op Operation to modify * \param[in] pending Pending call to set */ void services_set_op_pending(svc_action_t *op, DBusPendingCall *pending) { if (op->opaque->pending && (op->opaque->pending != pending)) { if (pending) { crm_info("Lost pending %s DBus call (%p)", op->id, op->opaque->pending); } else { crm_trace("Done with pending %s DBus call (%p)", op->id, op->opaque->pending); } dbus_pending_call_unref(op->opaque->pending); } op->opaque->pending = pending; if (pending) { crm_trace("Updated pending %s DBus call (%p)", op->id, pending); } else { crm_trace("Cleared pending %s DBus call", op->id); } } #endif void services_action_cleanup(svc_action_t * op) { if ((op == NULL) || (op->opaque == NULL)) { return; } #if HAVE_DBUS if(op->opaque->timerid != 0) { crm_trace("Removing timer for call %s to %s", op->action, op->rsc); g_source_remove(op->opaque->timerid); op->opaque->timerid = 0; } if(op->opaque->pending) { if (dbus_pending_call_get_completed(op->opaque->pending)) { // This should never be the case crm_warn("Result of %s op %s was unhandled", op->standard, op->id); } else { crm_debug("Will ignore any result of canceled %s op %s", op->standard, op->id); } dbus_pending_call_cancel(op->opaque->pending); services_set_op_pending(op, NULL); } #endif if (op->opaque->stderr_gsource) { mainloop_del_fd(op->opaque->stderr_gsource); op->opaque->stderr_gsource = NULL; } if (op->opaque->stdout_gsource) { mainloop_del_fd(op->opaque->stdout_gsource); op->opaque->stdout_gsource = NULL; } } /*! * \internal * \brief Map an actual resource action result to a standard OCF result * * \param[in] standard Agent standard (must not be "service") * \param[in] action Action that result is for * \param[in] exit_status Actual agent exit status * * \return Standard OCF result */ enum ocf_exitcode services_result2ocf(const char *standard, const char *action, int exit_status) { if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) { return services__ocf2ocf(exit_status); #if SUPPORT_SYSTEMD } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { return services__systemd2ocf(exit_status); #endif -#if SUPPORT_UPSTART - } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_UPSTART, - pcmk__str_casei)) { - return services__upstart2ocf(exit_status); -#endif - #if SUPPORT_NAGIOS } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) { return services__nagios2ocf(exit_status); #endif #if PCMK__ENABLE_LSB } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)) { return services__lsb2ocf(action, exit_status); #endif } else { crm_warn("Treating result from unknown standard '%s' as OCF", ((standard == NULL)? "unspecified" : standard)); return services__ocf2ocf(exit_status); } } void services_action_free(svc_action_t * op) { unsigned int i; if (op == NULL) { return; } /* The operation should be removed from all tracking lists by this point. * If it's not, we have a bug somewhere, so bail. That may lead to a * memory leak, but it's better than a use-after-free segmentation fault. */ CRM_CHECK(g_list_find(inflight_ops, op) == NULL, return); CRM_CHECK(g_list_find(blocked_ops, op) == NULL, return); CRM_CHECK((recurring_actions == NULL) || (g_hash_table_lookup(recurring_actions, op->id) == NULL), return); services_action_cleanup(op); if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } free(op->id); free(op->opaque->exec); for (i = 0; i < PCMK__NELEM(op->opaque->args); i++) { free(op->opaque->args[i]); } free(op->opaque->exit_reason); free(op->opaque); free(op->rsc); free(op->action); free(op->standard); free(op->agent); free(op->provider); free(op->stdout_data); free(op->stderr_data); if (op->params) { g_hash_table_destroy(op->params); op->params = NULL; } free(op); } gboolean cancel_recurring_action(svc_action_t * op) { crm_info("Cancelling %s operation %s", op->standard, op->id); if (recurring_actions) { g_hash_table_remove(recurring_actions, op->id); } if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } return TRUE; } /*! * \brief Cancel a recurring action * * \param[in] name Name of resource that operation is for * \param[in] action Name of operation to cancel * \param[in] interval_ms Interval of operation to cancel * * \return TRUE if action was successfully cancelled, FALSE otherwise */ gboolean services_action_cancel(const char *name, const char *action, guint interval_ms) { gboolean cancelled = FALSE; char *id = pcmk__op_key(name, action, interval_ms); svc_action_t *op = NULL; /* We can only cancel a recurring action */ init_recurring_actions(); op = g_hash_table_lookup(recurring_actions, id); if (op == NULL) { goto done; } // Tell services__finalize_async_op() not to reschedule the operation op->cancel = TRUE; /* Stop tracking it as a recurring operation, and stop its repeat timer */ cancel_recurring_action(op); /* If the op has a PID, it's an in-flight child process, so kill it. * * Whether the kill succeeds or fails, the main loop will send the op to * async_action_complete() (and thus services__finalize_async_op()) when the * process goes away. */ if (op->pid != 0) { crm_info("Terminating in-flight op %s[%d] early because it was cancelled", id, op->pid); cancelled = mainloop_child_kill(op->pid); if (cancelled == FALSE) { crm_err("Termination of %s[%d] failed", id, op->pid); } goto done; } #if HAVE_DBUS - // In-flight systemd and upstart ops don't have a pid - if (inflight_systemd_or_upstart(op)) { + // In-flight systemd ops don't have a pid + if (inflight_systemd(op)) { inflight_ops = g_list_remove(inflight_ops, op); /* This will cause any result that comes in later to be discarded, so we * don't call the callback and free the operation twice. */ services_action_cleanup(op); } #endif /* The rest of this is essentially equivalent to * services__finalize_async_op(), minus the handle_blocked_ops() call. */ // Report operation as cancelled services__set_cancelled(op); if (op->opaque->callback) { op->opaque->callback(op); } blocked_ops = g_list_remove(blocked_ops, op); services_action_free(op); cancelled = TRUE; // @TODO Initiate handle_blocked_ops() asynchronously done: free(id); return cancelled; } gboolean services_action_kick(const char *name, const char *action, guint interval_ms) { svc_action_t * op = NULL; char *id = pcmk__op_key(name, action, interval_ms); init_recurring_actions(); op = g_hash_table_lookup(recurring_actions, id); free(id); if (op == NULL) { return FALSE; } - if (op->pid || inflight_systemd_or_upstart(op)) { + if (op->pid || inflight_systemd(op)) { return TRUE; } else { if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } recurring_action_timer(op); return TRUE; } } /*! * \internal * \brief Add a new recurring operation, checking for duplicates * * \param[in,out] op Operation to add * * \return TRUE if duplicate found (and reschedule), FALSE otherwise */ static gboolean handle_duplicate_recurring(svc_action_t *op) { svc_action_t * dup = NULL; /* check for duplicates */ dup = g_hash_table_lookup(recurring_actions, op->id); if (dup && (dup != op)) { /* update user data */ if (op->opaque->callback) { dup->opaque->callback = op->opaque->callback; dup->cb_data = op->cb_data; op->cb_data = NULL; } /* immediately execute the next interval */ if (dup->pid != 0) { if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } recurring_action_timer(dup); } /* free the duplicate */ services_action_free(op); return TRUE; } return FALSE; } /*! * \internal * \brief Execute an action appropriately according to its standard * * \param[in,out] op Action to execute * * \return Standard Pacemaker return code * \retval EBUSY Recurring operation could not be initiated * \retval pcmk_rc_error Synchronous action failed * \retval pcmk_rc_ok Synchronous action succeeded, or asynchronous action * should not be freed (because it's pending or because * it failed to execute and was already freed) * * \note If the return value for an asynchronous action is not pcmk_rc_ok, the * caller is responsible for freeing the action. */ static int execute_action(svc_action_t *op) { -#if SUPPORT_UPSTART - if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_UPSTART, - pcmk__str_casei)) { - return services__execute_upstart(op); - } -#endif - #if SUPPORT_SYSTEMD if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { return services__execute_systemd(op); } #endif return services__execute_file(op); } void services_add_inflight_op(svc_action_t * op) { if (op == NULL) { return; } CRM_ASSERT(op->synchronous == FALSE); /* keep track of ops that are in-flight to avoid collisions in the same namespace */ if (op->rsc) { inflight_ops = g_list_append(inflight_ops, op); } } /*! * \internal * \brief Stop tracking an operation that completed * * \param[in] op Operation to stop tracking */ void services_untrack_op(const svc_action_t *op) { /* Op is no longer in-flight or blocked */ inflight_ops = g_list_remove(inflight_ops, op); blocked_ops = g_list_remove(blocked_ops, op); /* Op is no longer blocking other ops, so check if any need to run */ handle_blocked_ops(); } gboolean services_action_async_fork_notify(svc_action_t * op, void (*action_callback) (svc_action_t *), void (*action_fork_callback) (svc_action_t *)) { CRM_CHECK(op != NULL, return TRUE); op->synchronous = false; if (action_callback != NULL) { op->opaque->callback = action_callback; } if (action_fork_callback != NULL) { op->opaque->fork_callback = action_fork_callback; } if (op->interval_ms > 0) { init_recurring_actions(); if (handle_duplicate_recurring(op)) { /* entry rescheduled, dup freed */ /* exit early */ return TRUE; } g_hash_table_replace(recurring_actions, op->id, op); } if (!pcmk_is_set(op->flags, SVC_ACTION_NON_BLOCKED) && op->rsc && is_op_blocked(op->rsc)) { blocked_ops = g_list_append(blocked_ops, op); return TRUE; } return execute_action(op) == pcmk_rc_ok; } gboolean services_action_async(svc_action_t * op, void (*action_callback) (svc_action_t *)) { return services_action_async_fork_notify(op, action_callback, NULL); } static gboolean processing_blocked_ops = FALSE; gboolean is_op_blocked(const char *rsc) { GList *gIter = NULL; svc_action_t *op = NULL; for (gIter = inflight_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; if (pcmk__str_eq(op->rsc, rsc, pcmk__str_casei)) { return TRUE; } } return FALSE; } static void handle_blocked_ops(void) { GList *executed_ops = NULL; GList *gIter = NULL; svc_action_t *op = NULL; if (processing_blocked_ops) { /* avoid nested calling of this function */ return; } processing_blocked_ops = TRUE; /* n^2 operation here, but blocked ops are incredibly rare. this list * will be empty 99% of the time. */ for (gIter = blocked_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; if (is_op_blocked(op->rsc)) { continue; } executed_ops = g_list_append(executed_ops, op); if (execute_action(op) != pcmk_rc_ok) { /* this can cause this function to be called recursively * which is why we have processing_blocked_ops static variable */ services__finalize_async_op(op); } } for (gIter = executed_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; blocked_ops = g_list_remove(blocked_ops, op); } g_list_free(executed_ops); processing_blocked_ops = FALSE; } /*! * \internal * \brief Execute a meta-data action appropriately to standard * * \param[in,out] op Meta-data action to execute * * \return Standard Pacemaker return code */ static int execute_metadata_action(svc_action_t *op) { const char *class = op->standard; if (op->agent == NULL) { crm_info("Meta-data requested without specifying agent"); services__set_result(op, services__generic_error(op), PCMK_EXEC_ERROR_FATAL, "Agent not specified"); return EINVAL; } if (class == NULL) { crm_info("Meta-data requested for agent %s without specifying class", op->agent); services__set_result(op, services__generic_error(op), PCMK_EXEC_ERROR_FATAL, "Agent standard not specified"); return EINVAL; } #if PCMK__ENABLE_SERVICE if (!strcmp(class, PCMK_RESOURCE_CLASS_SERVICE)) { class = resources_find_service_class(op->agent); } if (class == NULL) { crm_info("Meta-data requested for %s, but could not determine class", op->agent); services__set_result(op, services__generic_error(op), PCMK_EXEC_ERROR_HARD, "Agent standard could not be determined"); return EINVAL; } #endif #if PCMK__ENABLE_LSB if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)) { return pcmk_legacy2rc(services__get_lsb_metadata(op->agent, &op->stdout_data)); } #endif #if SUPPORT_NAGIOS if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) { return pcmk_legacy2rc(services__get_nagios_metadata(op->agent, &op->stdout_data)); } #endif return execute_action(op); } gboolean services_action_sync(svc_action_t * op) { gboolean rc = TRUE; if (op == NULL) { crm_trace("No operation to execute"); return FALSE; } op->synchronous = true; if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) { /* Synchronous meta-data operations are handled specially. Since most * resource classes don't provide any meta-data, it has to be * synthesized from available information about the agent. * * services_action_async() doesn't treat meta-data actions specially, so * it will result in an error for classes that don't support the action. */ rc = (execute_metadata_action(op) == pcmk_rc_ok); } else { rc = (execute_action(op) == pcmk_rc_ok); } crm_trace(" > " PCMK__OP_FMT ": %s = %d", op->rsc, op->action, op->interval_ms, op->opaque->exec, op->rc); if (op->stdout_data) { crm_trace(" > stdout: %s", op->stdout_data); } if (op->stderr_data) { crm_trace(" > stderr: %s", op->stderr_data); } return rc; } GList * get_directory_list(const char *root, gboolean files, gboolean executable) { return services_os_get_directory_list(root, files, executable); } GList * resources_list_standards(void) { GList *standards = NULL; standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_OCF)); #if PCMK__ENABLE_SERVICE standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_SERVICE)); #endif #if PCMK__ENABLE_LSB standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_LSB)); #endif #if SUPPORT_SYSTEMD { GList *agents = systemd_unit_listall(); if (agents != NULL) { standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_SYSTEMD)); g_list_free_full(agents, free); } } #endif -#if SUPPORT_UPSTART - { - GList *agents = upstart_job_listall(); - - if (agents != NULL) { - standards = g_list_append(standards, - strdup(PCMK_RESOURCE_CLASS_UPSTART)); - g_list_free_full(agents, free); - } - } -#endif - #if SUPPORT_NAGIOS { GList *agents = services__list_nagios_agents(); if (agents != NULL) { standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_NAGIOS)); g_list_free_full(agents, free); } } #endif return standards; } GList * resources_list_providers(const char *standard) { if (pcmk_is_set(pcmk_get_ra_caps(standard), pcmk_ra_cap_provider)) { return resources_os_list_ocf_providers(); } return NULL; } GList * resources_list_agents(const char *standard, const char *provider) { if ((standard == NULL) #if PCMK__ENABLE_SERVICE || (strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) == 0) #endif ) { GList *tmp1; GList *tmp2; GList *result = NULL; if (standard == NULL) { tmp1 = result; tmp2 = resources_os_list_ocf_agents(NULL); if (tmp2) { result = g_list_concat(tmp1, tmp2); } } #if PCMK__ENABLE_LSB result = g_list_concat(result, services__list_lsb_agents()); #endif #if SUPPORT_SYSTEMD tmp1 = result; tmp2 = systemd_unit_listall(); if (tmp2) { result = g_list_concat(tmp1, tmp2); } #endif -#if SUPPORT_UPSTART - tmp1 = result; - tmp2 = upstart_job_listall(); - if (tmp2) { - result = g_list_concat(tmp1, tmp2); - } -#endif - return result; } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_OCF) == 0) { return resources_os_list_ocf_agents(provider); #if PCMK__ENABLE_LSB } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_LSB) == 0) { return services__list_lsb_agents(); #endif #if SUPPORT_SYSTEMD } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) { return systemd_unit_listall(); #endif -#if SUPPORT_UPSTART - } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_UPSTART) == 0) { - return upstart_job_listall(); -#endif #if SUPPORT_NAGIOS } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_NAGIOS) == 0) { return services__list_nagios_agents(); #endif } return NULL; } gboolean resources_agent_exists(const char *standard, const char *provider, const char *agent) { GList *standards = NULL; GList *providers = NULL; GList *iter = NULL; gboolean rc = FALSE; gboolean has_providers = FALSE; standards = resources_list_standards(); for (iter = standards; iter != NULL; iter = iter->next) { if (pcmk__str_eq(iter->data, standard, pcmk__str_none)) { rc = TRUE; break; } } if (rc == FALSE) { goto done; } rc = FALSE; has_providers = pcmk_is_set(pcmk_get_ra_caps(standard), pcmk_ra_cap_provider); if (has_providers == TRUE && provider != NULL) { providers = resources_list_providers(standard); for (iter = providers; iter != NULL; iter = iter->next) { if (pcmk__str_eq(iter->data, provider, pcmk__str_none)) { rc = TRUE; break; } } } else if (has_providers == FALSE && provider == NULL) { rc = TRUE; } if (rc == FALSE) { goto done; } #if PCMK__ENABLE_SERVICE if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { #if PCMK__ENABLE_LSB if (services__lsb_agent_exists(agent)) { rc = TRUE; goto done; } #endif #if SUPPORT_SYSTEMD if (systemd_unit_exists(agent)) { rc = TRUE; goto done; } -#endif -#if SUPPORT_UPSTART - if (upstart_job_exists(agent)) { - rc = TRUE; - goto done; - } #endif rc = FALSE; goto done; } #endif if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) { rc = services__ocf_agent_exists(provider, agent); #if PCMK__ENABLE_LSB } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)) { rc = services__lsb_agent_exists(agent); #endif #if SUPPORT_SYSTEMD } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { rc = systemd_unit_exists(agent); #endif -#if SUPPORT_UPSTART - } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_casei)) { - rc = upstart_job_exists(agent); -#endif - #if SUPPORT_NAGIOS } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) { rc = services__nagios_agent_exists(agent); #endif } else { rc = FALSE; } done: g_list_free(standards); g_list_free(providers); return rc; } /*! * \internal * \brief Set the result of an action * * \param[out] action Where to set action result * \param[in] agent_status Exit status to set * \param[in] exec_status Execution status to set * \param[in] reason Human-friendly description of event to set */ void services__set_result(svc_action_t *action, int agent_status, enum pcmk_exec_status exec_status, const char *reason) { if (action == NULL) { return; } action->rc = agent_status; action->status = exec_status; if (!pcmk__str_eq(action->opaque->exit_reason, reason, pcmk__str_none)) { free(action->opaque->exit_reason); action->opaque->exit_reason = (reason == NULL)? NULL : strdup(reason); } } /*! * \internal * \brief Set the result of an action, with a formatted exit reason * * \param[out] action Where to set action result * \param[in] agent_status Exit status to set * \param[in] exec_status Execution status to set * \param[in] format printf-style format for a human-friendly * description of reason for result * \param[in] ... arguments for \p format */ void services__format_result(svc_action_t *action, int agent_status, enum pcmk_exec_status exec_status, const char *format, ...) { va_list ap; int len = 0; char *reason = NULL; if (action == NULL) { return; } action->rc = agent_status; action->status = exec_status; if (format != NULL) { va_start(ap, format); len = vasprintf(&reason, format, ap); CRM_ASSERT(len > 0); va_end(ap); } free(action->opaque->exit_reason); action->opaque->exit_reason = reason; } /*! * \internal * \brief Set the result of an action to cancelled * * \param[out] action Where to set action result * * \note This sets execution status but leaves the exit status unchanged */ void services__set_cancelled(svc_action_t *action) { if (action != NULL) { action->status = PCMK_EXEC_CANCELLED; free(action->opaque->exit_reason); action->opaque->exit_reason = NULL; } } /*! * \internal * \brief Get a readable description of what an action is for * * \param[in] action Action to check * * \return Readable name for the kind of \p action */ const char * services__action_kind(const svc_action_t *action) { if ((action == NULL) || (action->standard == NULL)) { return "Process"; } else if (pcmk__str_eq(action->standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_none)) { return "Fence agent"; } else if (pcmk__str_eq(action->standard, PCMK_RESOURCE_CLASS_ALERT, pcmk__str_none)) { return "Alert agent"; } else { return "Resource agent"; } } /*! * \internal * \brief Get the exit reason of an action * * \param[in] action Action to check * * \return Action's exit reason (or NULL if none) */ const char * services__exit_reason(const svc_action_t *action) { return action->opaque->exit_reason; } /*! * \internal * \brief Steal stdout from an action * * \param[in,out] action Action whose stdout is desired * * \return Action's stdout (which may be NULL) * \note Upon return, \p action will no longer track the output, so it is the * caller's responsibility to free the return value. */ char * services__grab_stdout(svc_action_t *action) { char *output = action->stdout_data; action->stdout_data = NULL; return output; } /*! * \internal * \brief Steal stderr from an action * * \param[in,out] action Action whose stderr is desired * * \return Action's stderr (which may be NULL) * \note Upon return, \p action will no longer track the output, so it is the * caller's responsibility to free the return value. */ char * services__grab_stderr(svc_action_t *action) { char *output = action->stderr_data; action->stderr_data = NULL; return output; } diff --git a/lib/services/upstart.c b/lib/services/upstart.c deleted file mode 100644 index b6f58f44f7..0000000000 --- a/lib/services/upstart.c +++ /dev/null @@ -1,715 +0,0 @@ -/* - * Original copyright 2010 Senko Rasic - * and Ante Karamatic - * Later changes copyright 2012-2024 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#include - -#include - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#define BUS_NAME "com.ubuntu.Upstart" -#define BUS_PATH "/com/ubuntu/Upstart" - -#define UPSTART_06_API BUS_NAME"0_6" -#define UPSTART_JOB_IFACE UPSTART_06_API".Job" -#define BUS_PROPERTY_IFACE "org.freedesktop.DBus.Properties" - -/* - http://upstart.ubuntu.com/wiki/DBusInterface -*/ -static DBusConnection *upstart_proxy = NULL; - -/*! - * \internal - * \brief Prepare an Upstart action - * - * \param[in,out] op Action to prepare - * - * \return Standard Pacemaker return code - */ -int -services__upstart_prepare(svc_action_t *op) -{ - op->opaque->exec = strdup("upstart-dbus"); - if (op->opaque->exec == NULL) { - return ENOMEM; - } - return pcmk_rc_ok; -} - -/*! - * \internal - * \brief Map a Upstart result to a standard OCF result - * - * \param[in] exit_status Upstart result - * - * \return Standard OCF result - */ -enum ocf_exitcode -services__upstart2ocf(int exit_status) -{ - // This library uses OCF codes for Upstart actions - return (enum ocf_exitcode) exit_status; -} - -static gboolean -upstart_init(void) -{ - static int need_init = 1; - - if (need_init) { - need_init = 0; - upstart_proxy = pcmk_dbus_connect(); - } - if (upstart_proxy == NULL) { - return FALSE; - } - return TRUE; -} - -void -upstart_cleanup(void) -{ - if (upstart_proxy) { - pcmk_dbus_disconnect(upstart_proxy); - upstart_proxy = NULL; - } -} - -/*! - * \internal - * \brief Get the DBus object path corresponding to a job name - * - * \param[in] arg_name Name of job to get path for - * \param[out] path If not NULL, where to store DBus object path - * \param[in] timeout Give up after this many seconds - * - * \return true if object path was found, false otherwise - * \note The caller is responsible for freeing *path if it is non-NULL. - */ -static bool -object_path_for_job(const gchar *arg_name, char **path, int timeout) -{ - /* - com.ubuntu.Upstart0_6.GetJobByName (in String name, out ObjectPath job) - */ - DBusError error; - DBusMessage *msg; - DBusMessage *reply = NULL; - bool rc = false; - - if (path != NULL) { - *path = NULL; - } - - if (!upstart_init()) { - return false; - } - msg = dbus_message_new_method_call(BUS_NAME, // target for the method call - BUS_PATH, // object to call on - UPSTART_06_API, // interface to call on - "GetJobByName"); // method name - - dbus_error_init(&error); - CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &arg_name, - DBUS_TYPE_INVALID)); - reply = pcmk_dbus_send_recv(msg, upstart_proxy, &error, timeout); - dbus_message_unref(msg); - - if (dbus_error_is_set(&error)) { - crm_err("Could not get DBus object path for %s: %s", - arg_name, error.message); - dbus_error_free(&error); - - } else if (!pcmk_dbus_type_check(reply, NULL, DBUS_TYPE_OBJECT_PATH, - __func__, __LINE__)) { - crm_err("Could not get DBus object path for %s: Invalid return type", - arg_name); - - } else { - if (path != NULL) { - dbus_message_get_args(reply, NULL, DBUS_TYPE_OBJECT_PATH, path, - DBUS_TYPE_INVALID); - if (*path != NULL) { - *path = strdup(*path); - } - } - rc = true; - } - - if (reply != NULL) { - dbus_message_unref(reply); - } - return rc; -} - -static void -fix(char *input, const char *search, char replace) -{ - char *match = NULL; - int shuffle = strlen(search) - 1; - - while (TRUE) { - int len, lpc; - - match = strstr(input, search); - if (match == NULL) { - break; - } - crm_trace("Found: %s", match); - match[0] = replace; - len = strlen(match) - shuffle; - for (lpc = 1; lpc <= len; lpc++) { - match[lpc] = match[lpc + shuffle]; - } - } -} - -static char * -fix_upstart_name(const char *input) -{ - char *output = strdup(input); - - fix(output, "_2b", '+'); - fix(output, "_2c", ','); - fix(output, "_2d", '-'); - fix(output, "_2e", '.'); - fix(output, "_40", '@'); - fix(output, "_5f", '_'); - return output; -} - -GList * -upstart_job_listall(void) -{ - GList *units = NULL; - DBusMessageIter args; - DBusMessageIter unit; - DBusMessage *msg = NULL; - DBusMessage *reply = NULL; - const char *method = "GetAllJobs"; - DBusError error; - int lpc = 0; - - if (upstart_init() == FALSE) { - return NULL; - } - -/* - com.ubuntu.Upstart0_6.GetAllJobs (out jobs) -*/ - - dbus_error_init(&error); - msg = dbus_message_new_method_call(BUS_NAME, // target for the method call - BUS_PATH, // object to call on - UPSTART_06_API, // interface to call on - method); // method name - CRM_ASSERT(msg != NULL); - - reply = pcmk_dbus_send_recv(msg, upstart_proxy, &error, DBUS_TIMEOUT_USE_DEFAULT); - dbus_message_unref(msg); - - if (dbus_error_is_set(&error)) { - crm_err("Call to %s failed: %s", method, error.message); - dbus_error_free(&error); - return NULL; - - } else if (!dbus_message_iter_init(reply, &args)) { - crm_err("Call to %s failed: Message has no arguments", method); - dbus_message_unref(reply); - return NULL; - } - - if(!pcmk_dbus_type_check(reply, &args, DBUS_TYPE_ARRAY, __func__, __LINE__)) { - crm_err("Call to %s failed: Message has invalid arguments", method); - dbus_message_unref(reply); - return NULL; - } - - dbus_message_iter_recurse(&args, &unit); - while (dbus_message_iter_get_arg_type (&unit) != DBUS_TYPE_INVALID) { - DBusBasicValue value; - const char *job = NULL; - char *path = NULL; - - if(!pcmk_dbus_type_check(reply, &unit, DBUS_TYPE_OBJECT_PATH, __func__, __LINE__)) { - crm_warn("Skipping Upstart reply argument with unexpected type"); - continue; - } - - dbus_message_iter_get_basic(&unit, &value); - - if(value.str) { - int llpc = 0; - path = value.str; - job = value.str; - while (path[llpc] != 0) { - if (path[llpc] == '/') { - job = path + llpc + 1; - } - llpc++; - } - lpc++; - crm_trace("%s -> %s", path, job); - units = g_list_append(units, fix_upstart_name(job)); - } - dbus_message_iter_next (&unit); - } - - dbus_message_unref(reply); - crm_trace("Found %d upstart jobs", lpc); - return units; -} - -gboolean -upstart_job_exists(const char *name) -{ - return object_path_for_job(name, NULL, DBUS_TIMEOUT_USE_DEFAULT); -} - -static char * -get_first_instance(const gchar * job, int timeout) -{ - char *instance = NULL; - const char *method = "GetAllInstances"; - DBusError error; - DBusMessage *msg; - DBusMessage *reply; - DBusMessageIter args; - DBusMessageIter unit; - - dbus_error_init(&error); - msg = dbus_message_new_method_call(BUS_NAME, // target for the method call - job, // object to call on - UPSTART_JOB_IFACE, // interface to call on - method); // method name - CRM_ASSERT(msg != NULL); - - dbus_message_append_args(msg, DBUS_TYPE_INVALID); - reply = pcmk_dbus_send_recv(msg, upstart_proxy, &error, timeout); - dbus_message_unref(msg); - - if (dbus_error_is_set(&error)) { - crm_info("Call to %s failed: %s", method, error.message); - dbus_error_free(&error); - goto done; - - } else if(reply == NULL) { - crm_info("Call to %s failed: no reply", method); - goto done; - - } else if (!dbus_message_iter_init(reply, &args)) { - crm_info("Call to %s failed: Message has no arguments", method); - goto done; - } - - if(!pcmk_dbus_type_check(reply, &args, DBUS_TYPE_ARRAY, __func__, __LINE__)) { - crm_info("Call to %s failed: Message has invalid arguments", method); - goto done; - } - - dbus_message_iter_recurse(&args, &unit); - if(pcmk_dbus_type_check(reply, &unit, DBUS_TYPE_OBJECT_PATH, __func__, __LINE__)) { - DBusBasicValue value; - - dbus_message_iter_get_basic(&unit, &value); - - if(value.str) { - instance = strdup(value.str); - crm_trace("Result: %s", instance); - } - } - - done: - if(reply) { - dbus_message_unref(reply); - } - return instance; -} - -/*! - * \internal - * \brief Parse result of Upstart status check - * - * \param[in] name DBus interface name for property that was checked - * \param[in] state Property value - * \param[in,out] userdata Status action that check was done for - */ -static void -parse_status_result(const char *name, const char *state, void *userdata) -{ - svc_action_t *op = userdata; - - if (pcmk__str_eq(state, "running", pcmk__str_none)) { - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - } else { - services__set_result(op, PCMK_OCF_NOT_RUNNING, PCMK_EXEC_DONE, state); - } - - if (!(op->synchronous)) { - services_set_op_pending(op, NULL); - services__finalize_async_op(op); - } -} - -// @TODO Use XML string constants and maybe a real XML object -#define METADATA_FORMAT \ - "\n" \ - "<" PCMK_XE_RESOURCE_AGENT " " \ - PCMK_XA_NAME "=\"%s\" " \ - PCMK_XA_VERSION "=\"" PCMK_DEFAULT_AGENT_VERSION "\">\n" \ - " <" PCMK_XE_VERSION ">1.1\n" \ - " <" PCMK_XE_LONGDESC " " PCMK_XA_LANG "=\"" PCMK__VALUE_EN "\">\n" \ - " Upstart agent for controlling the system %s service\n" \ - " \n" \ - " <" PCMK_XE_SHORTDESC " " PCMK_XA_LANG "=\"" PCMK__VALUE_EN "\">" \ - "Upstart job for %s" \ - "\n" \ - " <" PCMK_XE_PARAMETERS "/>\n" \ - " <" PCMK_XE_ACTIONS ">\n" \ - " <" PCMK_XE_ACTION " " PCMK_XA_NAME "=\"" PCMK_ACTION_START "\"" \ - " " PCMK_META_TIMEOUT "=\"15s\" />\n" \ - " <" PCMK_XE_ACTION " " PCMK_XA_NAME "=\"" PCMK_ACTION_STOP "\"" \ - " " PCMK_META_TIMEOUT "=\"15s\" />\n" \ - " <" PCMK_XE_ACTION " " PCMK_XA_NAME "=\"" PCMK_ACTION_STATUS "\"" \ - " " PCMK_META_TIMEOUT "=\"15s\" />\n" \ - " <" PCMK_XE_ACTION " " PCMK_XA_NAME "=\"restart\"" \ - " " PCMK_META_TIMEOUT "=\"15s\" />\n" \ - " <" PCMK_XE_ACTION " " PCMK_XA_NAME "=\"" PCMK_ACTION_MONITOR "\"" \ - " " PCMK_META_TIMEOUT "=\"15s\"" \ - " " PCMK_META_INTERVAL "=\"15s\"" \ - " " PCMK_META_START_DELAY "=\"15s\" />\n" \ - " <" PCMK_XE_ACTION " " PCMK_XA_NAME "=\"" PCMK_ACTION_META_DATA "\"" \ - " " PCMK_META_TIMEOUT "=\"5s\" />\n" \ - " \n" \ - " <" PCMK_XE_SPECIAL " " PCMK_XA_TAG "=\"upstart\"/>\n" \ - "\n" - -static char * -upstart_job_metadata(const char *name) -{ - return crm_strdup_printf(METADATA_FORMAT, name, name, name); -} - -/*! - * \internal - * \brief Set an action result based on a method error - * - * \param[in,out] op Action to set result for - * \param[in] error Method error - */ -static void -set_result_from_method_error(svc_action_t *op, const DBusError *error) -{ - services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, - "Unable to invoke Upstart DBus method"); - - if (strstr(error->name, UPSTART_06_API ".Error.UnknownInstance")) { - - if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_casei)) { - crm_trace("Masking stop failure (%s) for %s " - "because unknown service can be considered stopped", - error->name, pcmk__s(op->rsc, "unknown resource")); - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - return; - } - - services__set_result(op, PCMK_OCF_NOT_INSTALLED, - PCMK_EXEC_NOT_INSTALLED, "Upstart job not found"); - - } else if (pcmk__str_eq(op->action, PCMK_ACTION_START, pcmk__str_casei) - && strstr(error->name, UPSTART_06_API ".Error.AlreadyStarted")) { - crm_trace("Masking start failure (%s) for %s " - "because already started resource is OK", - error->name, pcmk__s(op->rsc, "unknown resource")); - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - return; - } - - crm_info("DBus request for %s of Upstart job %s for resource %s failed: %s", - op->action, op->agent, pcmk__s(op->rsc, "with unknown name"), - error->message); -} - -/*! - * \internal - * \brief Process the completion of an asynchronous job start, stop, or restart - * - * \param[in,out] pending If not NULL, DBus call associated with request - * \param[in,out] user_data Action that was executed - */ -static void -job_method_complete(DBusPendingCall *pending, void *user_data) -{ - DBusError error; - DBusMessage *reply = NULL; - svc_action_t *op = user_data; - - // Grab the reply - if (pending != NULL) { - reply = dbus_pending_call_steal_reply(pending); - } - - // Determine result - dbus_error_init(&error); - if (pcmk_dbus_find_error(pending, reply, &error)) { - set_result_from_method_error(op, &error); - dbus_error_free(&error); - - } else if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_none)) { - // Call has no return value - crm_debug("DBus request for stop of %s succeeded", - pcmk__s(op->rsc, "unknown resource")); - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - - } else if (!pcmk_dbus_type_check(reply, NULL, DBUS_TYPE_OBJECT_PATH, - __func__, __LINE__)) { - crm_info("DBus request for %s of %s succeeded but " - "return type was unexpected", op->action, - pcmk__s(op->rsc, "unknown resource")); - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - - } else { - const char *path = NULL; - - dbus_message_get_args(reply, NULL, DBUS_TYPE_OBJECT_PATH, &path, - DBUS_TYPE_INVALID); - crm_debug("DBus request for %s of %s using %s succeeded", - op->action, pcmk__s(op->rsc, "unknown resource"), path); - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - } - - // The call is no longer pending - CRM_LOG_ASSERT(pending == op->opaque->pending); - services_set_op_pending(op, NULL); - - // Finalize action - services__finalize_async_op(op); - if (reply != NULL) { - dbus_message_unref(reply); - } -} - -/*! - * \internal - * \brief Execute an Upstart action - * - * \param[in,out] op Action to execute - * - * \return Standard Pacemaker return code - * \retval EBUSY Recurring operation could not be initiated - * \retval pcmk_rc_error Synchronous action failed - * \retval pcmk_rc_ok Synchronous action succeeded, or asynchronous action - * should not be freed (because it's pending or because - * it failed to execute and was already freed) - * - * \note If the return value for an asynchronous action is not pcmk_rc_ok, the - * caller is responsible for freeing the action. - */ -int -services__execute_upstart(svc_action_t *op) -{ - char *job = NULL; - int arg_wait = TRUE; - const char *arg_env = "pacemaker=1"; - const char *action = op->action; - - DBusError error; - DBusMessage *msg = NULL; - DBusMessage *reply = NULL; - DBusMessageIter iter, array_iter; - - CRM_ASSERT(op != NULL); - - if ((op->action == NULL) || (op->agent == NULL)) { - services__set_result(op, PCMK_OCF_NOT_CONFIGURED, PCMK_EXEC_ERROR_FATAL, - "Bug in action caller"); - goto cleanup; - } - - if (!upstart_init()) { - services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, - "No DBus connection"); - goto cleanup; - } - - if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) { - op->stdout_data = upstart_job_metadata(op->agent); - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - goto cleanup; - } - - if (!object_path_for_job(op->agent, &job, op->timeout)) { - if (pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) { - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - } else { - services__set_result(op, PCMK_OCF_NOT_INSTALLED, - PCMK_EXEC_NOT_INSTALLED, - "Upstart job not found"); - } - goto cleanup; - } - - if (job == NULL) { - // Shouldn't normally be possible -- maybe a memory error - op->rc = PCMK_OCF_UNKNOWN_ERROR; - op->status = PCMK_EXEC_ERROR; - goto cleanup; - } - - if (pcmk__strcase_any_of(op->action, PCMK_ACTION_MONITOR, - PCMK_ACTION_STATUS, NULL)) { - DBusPendingCall *pending = NULL; - char *state = NULL; - char *path = get_first_instance(job, op->timeout); - - services__set_result(op, PCMK_OCF_NOT_RUNNING, PCMK_EXEC_DONE, - "No Upstart job instances found"); - if (path == NULL) { - goto cleanup; - } - state = pcmk_dbus_get_property(upstart_proxy, BUS_NAME, path, - UPSTART_06_API ".Instance", "state", - op->synchronous? NULL : parse_status_result, - op, - op->synchronous? NULL : &pending, - op->timeout); - free(path); - - if (op->synchronous) { - parse_status_result("state", state, op); - free(state); - - } else if (pending == NULL) { - services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, - "Could not get job state from DBus"); - - } else { // Successfully initiated async op - free(job); - services_set_op_pending(op, pending); - services_add_inflight_op(op); - return pcmk_rc_ok; - } - - goto cleanup; - - } else if (pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) { - action = "Start"; - - } else if (pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) { - action = "Stop"; - - } else if (pcmk__str_eq(action, "restart", pcmk__str_none)) { - action = "Restart"; - - } else { - services__set_result(op, PCMK_OCF_UNIMPLEMENT_FEATURE, - PCMK_EXEC_ERROR_HARD, - "Action not implemented for Upstart resources"); - goto cleanup; - } - - // Initialize rc/status in case called functions don't set them - services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_DONE, - "Bug in service library"); - - crm_debug("Calling %s for %s on %s", - action, pcmk__s(op->rsc, "unknown resource"), job); - - msg = dbus_message_new_method_call(BUS_NAME, // target for the method call - job, // object to call on - UPSTART_JOB_IFACE, // interface to call on - action); // method name - CRM_ASSERT(msg != NULL); - - dbus_message_iter_init_append (msg, &iter); - CRM_LOG_ASSERT(dbus_message_iter_open_container(&iter, - DBUS_TYPE_ARRAY, - DBUS_TYPE_STRING_AS_STRING, - &array_iter)); - CRM_LOG_ASSERT(dbus_message_iter_append_basic(&array_iter, - DBUS_TYPE_STRING, &arg_env)); - CRM_LOG_ASSERT(dbus_message_iter_close_container(&iter, &array_iter)); - CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_BOOLEAN, &arg_wait, - DBUS_TYPE_INVALID)); - - if (!(op->synchronous)) { - DBusPendingCall *pending = pcmk_dbus_send(msg, upstart_proxy, - job_method_complete, op, - op->timeout); - - if (pending == NULL) { - services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, - "Unable to send DBus message"); - goto cleanup; - - } else { // Successfully initiated async op - free(job); - services_set_op_pending(op, pending); - services_add_inflight_op(op); - return pcmk_rc_ok; - } - } - - // Synchronous call - - dbus_error_init(&error); - reply = pcmk_dbus_send_recv(msg, upstart_proxy, &error, op->timeout); - - if (dbus_error_is_set(&error)) { - set_result_from_method_error(op, &error); - dbus_error_free(&error); - - } else if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_none)) { - // DBus call does not return a value - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - - } else if (!pcmk_dbus_type_check(reply, NULL, DBUS_TYPE_OBJECT_PATH, - __func__, __LINE__)) { - crm_info("Call to %s passed but return type was unexpected", - op->action); - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - - } else { - const char *path = NULL; - - dbus_message_get_args(reply, NULL, DBUS_TYPE_OBJECT_PATH, &path, - DBUS_TYPE_INVALID); - crm_debug("Call to %s passed: %s", op->action, path); - services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); - } - -cleanup: - free(job); - if (msg != NULL) { - dbus_message_unref(msg); - } - if (reply != NULL) { - dbus_message_unref(reply); - } - - if (op->synchronous) { - return (op->rc == PCMK_OCF_OK)? pcmk_rc_ok : pcmk_rc_error; - } else { - return services__finalize_async_op(op); - } -} diff --git a/lib/services/upstart.h b/lib/services/upstart.h deleted file mode 100644 index 75419a47b1..0000000000 --- a/lib/services/upstart.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2010 Senko Rasic - * Copyright 2010 Ante Karamatic - * Later changes copyright 2012-2024 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ -#ifndef PCMK__SERVICES_UPSTART__H -#define PCMK__SERVICES_UPSTART__H - -#include -#include "crm/services.h" - -#ifdef __cplusplus -extern "C" { -#endif - -G_GNUC_INTERNAL GList *upstart_job_listall(void); - -G_GNUC_INTERNAL -int services__upstart_prepare(svc_action_t *op); - -G_GNUC_INTERNAL -enum ocf_exitcode services__upstart2ocf(int exit_status); - -G_GNUC_INTERNAL -int services__execute_upstart(svc_action_t *op); - -G_GNUC_INTERNAL gboolean upstart_job_exists(const gchar * name); -G_GNUC_INTERNAL void upstart_cleanup(void); - -#ifdef __cplusplus -} -#endif - -#endif // PCMK__SERVICES_UPSTART__H diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in index 3e79fb00a1..592baa1793 100644 --- a/rpm/pacemaker.spec.in +++ b/rpm/pacemaker.spec.in @@ -1,957 +1,937 @@ # # Copyright 2008-2024 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # User-configurable globals and defines to control package behavior # (these should not test {with X} values, which are declared later) ## User and group to use for nonprivileged services %global uname hacluster %global gname haclient ## Where to install Pacemaker documentation %if 0%{?suse_version} > 0 %global pcmk_docdir %{_docdir}/%{name}-%{version} %else %if 0%{?rhel} > 7 %global pcmk_docdir %{_docdir}/%{name}-doc %else %global pcmk_docdir %{_docdir}/%{name} %endif %endif ## GitHub entity that distributes source (for ease of using a fork) %global github_owner ClusterLabs ## Where bug reports should be submitted ## Leave bug_url undefined to use ClusterLabs default, others define it here ## What to use as the OCF resource agent root directory %global ocf_root %{_prefix}/lib/ocf ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) %global pcmkversion X.Y.Z %global specversion 1 ## Upstream commit (full commit ID, abbreviated commit ID, or tag) to build %global commit HEAD ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %if (0%{?fedora} >= 26) || (0%{?rhel} >= 9) %global commit_abbrev 9 %else %global commit_abbrev 7 %endif # Define conditionals so that "rpmbuild --with " and # "rpmbuild --without " can enable and disable specific features ## Add option for Linux-HA (stonith/external) fencing agent support %if 0%{?suse_version} > 0 %bcond_without linuxha %else %bcond_with linuxha %endif ## Add option for whether to support storing sensitive information outside CIB %if (0%{?fedora} && 0%{?fedora} <= 33) || (0%{?rhel} && 0%{?rhel} <= 8) %bcond_with cibsecrets %else %bcond_without cibsecrets %endif ## Add option to enable Native Language Support (experimental) %bcond_with nls ## Add option to create binaries suitable for use with profiling tools %bcond_with profiling ## Allow deprecated option to skip (or enable, on RHEL) documentation %if 0%{?rhel} %bcond_with doc %else %bcond_without doc %endif ## Add option to default to start-up synchronization with SBD. ## ## If enabled, SBD *MUST* be built to default similarly, otherwise data ## corruption could occur. Building both Pacemaker and SBD to default ## to synchronization improves safety, without requiring higher-level tools ## to be aware of the setting or requiring users to modify configurations ## after upgrading to versions that support synchronization. %if 0%{?rhel} && 0%{?rhel} > 8 %bcond_without sbd_sync %else %bcond_with sbd_sync %endif ## Add option to prefix package version with "0." ## (so later "official" packages will be considered updates) %bcond_with pre_release -## Add option to ship Upstart job files -%bcond_with upstart_job - ## Add option to turn off hardening of libraries and daemon executables %bcond_without hardening ## Add option to enable (or disable, on RHEL 8) links for legacy daemon names %if 0%{?rhel} && 0%{?rhel} <= 8 %bcond_without legacy_links %else %bcond_with legacy_links %endif # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) ## Whether this is a tagged release (final or release candidate) %define tag_release %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo 1 ;; *%{rparen} echo 0 ;; esac) ## Portion of export/dist tarball name after "pacemaker-", and release version %if 0%{tag_release} %define archive_version %(c=%{commit}; echo ${c:10}) %define archive_github_url %{commit}#/%{name}-%{archive_version}.tar.gz %define pcmk_release %(c=%{commit}; case $c in *-rc[[:digit:]]*%{rparen} echo 0.%{specversion}.${c: -3} ;; *%{rparen} echo %{specversion} ;; esac) %else %if "%{commit}" == "DIST" %define archive_version %{pcmkversion} %define archive_github_url %{archive_version}#/%{name}-%{pcmkversion}.tar.gz %if %{with pre_release} %define pcmk_release 0.%{specversion} %else %define pcmk_release %{specversion} %endif %else %define archive_version %(c=%{commit}; echo ${c:0:%{commit_abbrev}}) %define archive_github_url %{archive_version}#/%{name}-%{archive_version}.tar.gz %if %{with pre_release} %define pcmk_release 0.%{specversion}.%{archive_version}.git %else %define pcmk_release %{specversion}.%{archive_version}.git %endif %endif %endif ## Whether this platform defaults to using systemd as an init system ## (needs to be evaluated prior to BuildRequires being enumerated and ## installed as it's intended to conditionally select some of these, and ## for that there are only few indicators with varying reliability: ## - presence of systemd-defined macros (when building in a full-fledged ## environment, which is not the case with ordinary mock-based builds) ## - systemd-aware rpm as manifested with the presence of particular ## macro (rpm itself will trivially always be present when building) ## - existence of /usr/lib/os-release file, which is something heavily ## propagated by systemd project ## - when not good enough, there's always a possibility to check ## particular distro-specific macros (incl. version comparison) %define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \ } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \ } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) %if 0%{?fedora} > 20 || 0%{?rhel} > 7 ## Base GnuTLS cipher priorities (presumably only the initial, required keyword) ## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'" %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM} %endif %if 0%{?fedora} > 22 || 0%{?rhel} > 7 %global supports_recommends 1 %endif ## Different distros name certain packages differently ## (note: corosync libraries also differ, but all provide corosync-devel) %if 0%{?suse_version} > 0 %global pkgname_bzip2_devel libbz2-devel %global pkgname_docbook_xsl docbook-xsl-stylesheets %global pkgname_gettext gettext-tools %global pkgname_shadow_utils shadow %global pkgname_procps procps %global pkgname_glue_libs libglue %global pkgname_pcmk_libs lib%{name}3 %global hacluster_id 90 %else %global pkgname_libtool_devel libtool-ltdl-devel %global pkgname_libtool_devel_arch libtool-ltdl-devel%{?_isa} %global pkgname_bzip2_devel bzip2-devel %global pkgname_docbook_xsl docbook-style-xsl %global pkgname_gettext gettext-devel %global pkgname_shadow_utils shadow-utils %global pkgname_procps procps-ng %global pkgname_glue_libs cluster-glue-libs %global pkgname_pcmk_libs %{name}-libs %global hacluster_id 189 %endif ## Distro-specific configuration choices ### Use 2.0-style output when other distro packages don't support current output %if ( 0%{?fedora} && 0%{?fedora} <=35 ) || ( 0%{?rhel} && 0%{?rhel} <= 8 ) %global compat20 --enable-compat-2.0 %endif ### Default concurrent-fencing to true when distro prefers that %if 0%{?rhel} >= 7 %global concurrent_fencing --with-concurrent-fencing-default=true %endif ### Default resource-stickiness to 1 when distro prefers that %if 0%{?fedora} >= 35 || 0%{?rhel} >= 9 %global resource_stickiness --with-resource-stickiness-default=1 %endif # Python-related definitions ## Turn off auto-compilation of Python files outside Python specific paths, ## so there's no risk that unexpected "__python" macro gets picked to do the ## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected) ## -- distro-dependent tricks or automake's fallback to be applied there %if %{defined _python_bytecompile_extra} %global _python_bytecompile_extra 0 %else ### the statement effectively means no RPM-native byte-compiling will occur at ### all, so distro-dependent tricks for Python-specific packages to be applied %global __os_install_post %(echo '%{__os_install_post}' | { sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) %endif ## Prefer Python 3 definitions explicitly, in case 2 is also available %if %{defined __python3} %global python_name python3 %global python_path %{__python3} %define python_site %{?python3_sitelib}%{!?python3_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else %if %{defined python_version} %global python_name python%(echo %{python_version} | cut -d'.' -f1) %define python_path %{?__python}%{!?__python:/usr/bin/%{python_name}} %else %global python_name python %global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}} %endif %define python_site %{?python_sitelib}%{!?python_sitelib:%( %{python_name} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %endif # Keep sane profiling data if requested %if %{with profiling} ## Disable -debuginfo package and stripping binaries/libraries %define debug_package %{nil} %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPL-2.0-or-later AND LGPL-2.1-or-later %else # initscript is Revised BSD License: GPL-2.0-or-later AND LGPL-2.1-or-later AND BSD-3-Clause %endif Url: https://www.clusterlabs.org/ # Example: https://codeload.github.com/ClusterLabs/pacemaker/tar.gz/e91769e # will download pacemaker-e91769e.tar.gz # # The ending part starting with '#' is ignored by github but necessary for # rpmbuild to know what the tar archive name is. (The downloaded file will be # named correctly only for commit IDs, not tagged releases.) # # You can use "spectool -s 0 pacemaker.spec" (rpmdevtools) to show final URL. Source0: https://codeload.github.com/%{github_owner}/%{name}/tar.gz/%{archive_github_url} Requires: resource-agents Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} %if %{with linuxha} Requires: %{python_name}-%{name} = %{version}-%{release} %endif %if !%{defined _unitdir} Requires: %{pkgname_procps} Requires: psmisc %endif %{?systemd_requires} Requires: %{python_path} BuildRequires: %{python_name}-devel BuildRequires: %{python_name}-setuptools # Pacemaker requires a minimum libqb functionality Requires: libqb >= 1.0.1 BuildRequires: pkgconfig(libqb) >= 1.0.1 # Required basic build tools BuildRequires: autoconf BuildRequires: automake BuildRequires: coreutils BuildRequires: findutils BuildRequires: gcc BuildRequires: grep BuildRequires: libtool %if %{defined pkgname_libtool_devel} BuildRequires: %{?pkgname_libtool_devel} %endif BuildRequires: make BuildRequires: pkgconfig >= 0.28 BuildRequires: sed # Required for core functionality BuildRequires: pkgconfig(glib-2.0) >= 2.42 BuildRequires: pkgconfig(gnutls) >= 3.1.7 BuildRequires: pkgconfig(libxml-2.0) >= 2.9.2 BuildRequires: libxslt-devel BuildRequires: pkgconfig(uuid) BuildRequires: %{pkgname_bzip2_devel} # Enables optional functionality BuildRequires: pkgconfig(dbus-1) >= 1.5.12 BuildRequires: %{pkgname_docbook_xsl} BuildRequires: help2man BuildRequires: ncurses-devel BuildRequires: pam-devel BuildRequires: %{pkgname_gettext} >= 0.18 # Required for "make check" BuildRequires: libcmocka-devel >= 1.1.0 BuildRequires: %{python_name}-psutil %if %{systemd_native} BuildRequires: pkgconfig(systemd) %endif Requires: corosync >= 2.0.0 BuildRequires: corosync-devel >= 2.0.0 %if %{with linuxha} BuildRequires: %{pkgname_glue_libs}-devel %endif %if %{with doc} BuildRequires: asciidoc BuildRequires: inkscape BuildRequires: %{python_name}-sphinx %endif # Booth requires this Provides: pacemaker-ticket-support = 2.0 Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description Pacemaker is an advanced, scalable High-Availability cluster resource manager. It supports more than 16 node clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: - --with(out) : cibsecrets hardening nls pre_release profiling - linuxha upstart_job + --with(out) : cibsecrets hardening linuxha nls pre_release profiling %package cli License: GPL-2.0-or-later AND LGPL-2.1-or-later Summary: Command line tools for controlling Pacemaker clusters Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %if 0%{?supports_recommends} Recommends: pcmk-cluster-manager = %{version}-%{release} # For crm_report Recommends: tar Recommends: bzip2 %endif Requires: perl-TimeDate Requires: %{pkgname_procps} Requires: psmisc Requires(post):coreutils %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{pkgname_pcmk_libs} License: GPL-2.0-or-later AND LGPL-2.1-or-later Summary: Core Pacemaker libraries Requires(pre): %{pkgname_shadow_utils} Requires: %{name}-schemas = %{version}-%{release} # sbd 1.4.0+ supports the libpe_status API for pe_working_set_t Conflicts: sbd < 1.4.0 %if ( 0%{?fedora} && 0%{?fedora} <=35 ) || ( 0%{?rhel} && 0%{?rhel} <= 8 ) Conflicts: pcs >= 0.11 %else Conflicts: pcs < 0.11 %endif %description -n %{pkgname_pcmk_libs} Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs} package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package cluster-libs License: GPL-2.0-or-later AND LGPL-2.1-or-later Summary: Cluster Libraries used by Pacemaker Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %description cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package -n %{python_name}-%{name} License: LGPL-2.1-or-later Summary: Python libraries for Pacemaker Requires: %{python_path} Requires: %{pkgname_pcmk_libs} = %{version}-%{release} BuildArch: noarch %description -n %{python_name}-%{name} Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{python_name}-%{name} package contains a Python library that can be used to interface with Pacemaker. %package remote %if %{defined _unitdir} License: GPL-2.0-or-later AND LGPL-2.1-or-later %else # initscript is Revised BSD License: GPL-2.0-or-later AND LGPL-2.1-or-later AND BSD-3-Clause %endif Summary: Pacemaker remote executor daemon for non-cluster nodes Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents %if !%{defined _unitdir} Requires: %{pkgname_procps} %endif # -remote can be fully independent of systemd %{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}} Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description remote Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-remote package contains the Pacemaker Remote daemon which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{pkgname_pcmk_libs}-devel License: GPL-2.0-or-later AND LGPL-2.1-or-later Summary: Pacemaker development package Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: %{pkgname_bzip2_devel}%{?_isa} Requires: corosync-devel >= 2.0.0 Requires: glib2-devel%{?_isa} Requires: libqb-devel%{?_isa} >= 1.0.1 %if %{defined pkgname_libtool_devel_arch} Requires: %{?pkgname_libtool_devel_arch} %endif Requires: libuuid-devel%{?_isa} Requires: libxml2-devel%{?_isa} >= 2.9.2 Requires: libxslt-devel%{?_isa} %description -n %{pkgname_pcmk_libs}-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs}-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts License: GPL-2.0-or-later AND LGPL-2.1-or-later Summary: Test framework for cluster-related technologies like Pacemaker Requires: %{python_path} Requires: %{pkgname_pcmk_libs} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: %{python_name}-%{name} = %{version}-%{release} Requires: %{pkgname_procps} Requires: psmisc Requires: %{python_name}-psutil BuildArch: noarch # systemd Python bindings are a separate package in some distros %if %{defined systemd_requires} %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Requires: %{python_name}-systemd %endif %endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: CC-BY-SA-4.0 Summary: Documentation for Pacemaker BuildArch: noarch %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager. %package schemas License: GPL-2.0-or-later Summary: Schemas and upgrade stylesheets for Pacemaker BuildArch: noarch %description schemas Schemas and upgrade stylesheets for Pacemaker Pacemaker is an advanced, scalable High-Availability cluster resource manager. %prep %setup -q -n %{name}-%{archive_version} %build export systemdsystemunitdir=%{?_unitdir}%{!?_unitdir:no} %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will # use its own defaults otherwise; if such hardenings are completely # undesired, rpmbuild using "--without hardening" # (or "--define '_without_hardening 1'") export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %endif ./autogen.sh %{configure} \ PYTHON=%{python_path} \ %{!?with_hardening: --disable-hardening} \ %{?with_legacy_links: --enable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_cibsecrets: --with-cibsecrets} \ %{?with_nls: --enable-nls} \ %{?with_sbd_sync: --with-sbd-sync-default="true"} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ %{?bug_url: --with-bug-url=%{bug_url}} \ %{?ocf_root: --with-ocfdir=%{ocf_root}} \ %{?concurrent_fencing} \ %{?resource_stickiness} \ %{?compat20} \ --disable-static \ --with-initdir=%{_initrddir} \ --with-runstatedir=%{_rundir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif make %{_smp_mflags} V=1 pushd python %py3_build popd %check make %{_smp_mflags} check { cts/cts-scheduler --run load-stopped-loop \ && cts/cts-cli \ && touch .CHECKED } 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint [ -f .CHECKED ] && rm -f -- .CHECKED %install # skip automake-native Python byte-compilation, since RPM-native one (possibly # distro-confined to Python-specific directories, which is currently the only # relevant place, anyway) assures proper intrinsic alignment with wider system # (such as with py_byte_compile macro, which is concurrent Fedora/EL specific) make install \ DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \ %{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}} pushd python %py3_install popd -%if %{with upstart_job} -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init -install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf -install -m 644 pacemakerd/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf -install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf -%endif - %if %{defined _unitdir} mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif %if %{with nls} %find_lang %{name} %endif # Don't package libtool archives find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f %post %if %{defined _unitdir} %systemd_post pacemaker.service %else /sbin/chkconfig --add pacemaker || : %endif %preun %if %{defined _unitdir} %systemd_preun pacemaker.service %else /sbin/service pacemaker stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker || : fi %endif %postun %if %{defined _unitdir} %systemd_postun_with_restart pacemaker.service %endif %pre remote %if %{defined _unitdir} # Stop the service before anything is touched, and remember to restart # it as one of the last actions (compared to using systemd_postun_with_restart, # this avoids suicide when sbd is in use) systemctl --quiet is-active pacemaker_remote if [ $? -eq 0 ] ; then mkdir -p %{_localstatedir}/lib/rpm-state/%{name} touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote systemctl stop pacemaker_remote >/dev/null 2>&1 else rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post remote %if %{defined _unitdir} %systemd_post pacemaker_remote.service %else /sbin/chkconfig --add pacemaker_remote || : %endif %preun remote %if %{defined _unitdir} %systemd_preun pacemaker_remote.service %else /sbin/service pacemaker_remote stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker_remote || : fi %endif %postun remote %if %{defined _unitdir} # This next line is a no-op, because we stopped the service earlier, but # we leave it here because it allows us to revert to the standard behavior # in the future if desired %systemd_postun_with_restart pacemaker_remote.service # Explicitly take care of removing the flag-file(s) upon final removal if [ "$1" -eq 0 ] ; then rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %posttrans remote %if %{defined _unitdir} if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then systemctl start pacemaker_remote >/dev/null 2>&1 rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post cli %if %{defined _unitdir} %systemd_post crm_mon.service %endif if [ "$1" -eq 2 ]; then # Package upgrade, not initial install: # Move any pre-2.0 logs to new location to ensure they get rotated { mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \ || mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker } >/dev/null 2>/dev/null || : fi %preun cli %if %{defined _unitdir} %systemd_preun crm_mon.service %endif %postun cli %if %{defined _unitdir} %systemd_postun_with_restart crm_mon.service %endif %pre -n %{pkgname_pcmk_libs} getent group %{gname} >/dev/null || groupadd -r %{gname} -g %{hacluster_id} getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} -s /sbin/nologin -c "cluster user" %{uname} exit 0 %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets -n %{pkgname_pcmk_libs} %ldconfig_scriptlets cluster-libs %else %post -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %postun -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %post cluster-libs -p /sbin/ldconfig %postun cluster-libs -p /sbin/ldconfig %endif %files ########################################################### %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %{_sbindir}/pacemakerd %if %{defined _unitdir} %{_unitdir}/pacemaker.service %else %{_initrddir}/pacemaker %endif %exclude %{_libexecdir}/pacemaker/cts-support %exclude %{_sbindir}/pacemaker-remoted %{_libexecdir}/pacemaker/* %if %{with linuxha} %{_sbindir}/fence_legacy %endif %{_sbindir}/fence_watchdog %doc %{_mandir}/man7/pacemaker-based.* %doc %{_mandir}/man7/pacemaker-controld.* %doc %{_mandir}/man7/pacemaker-schedulerd.* %doc %{_mandir}/man7/pacemaker-fenced.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* %if %{with linuxha} %doc %{_mandir}/man8/fence_legacy.* %endif %doc %{_mandir}/man8/fence_watchdog.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_datadir}/pacemaker/alerts %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine %{ocf_root}/resource.d/pacemaker/controld %{ocf_root}/resource.d/pacemaker/remote -%if %{with upstart_job} -%config(noreplace) %{_sysconfdir}/init/pacemaker.conf -%config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf -%endif - %files cli %dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %if %{defined _unitdir} %{_unitdir}/crm_mon.service %endif -%if %{with upstart_job} -%config(noreplace) %{_sysconfdir}/init/crm_mon.conf -%endif - %{_sbindir}/attrd_updater %{_sbindir}/cibadmin %if %{with cibsecrets} %{_sbindir}/cibsecret %endif %{_sbindir}/crm_attribute %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount %{_sbindir}/crm_master %{_sbindir}/crm_mon %{_sbindir}/crm_node %{_sbindir}/crm_resource %{_sbindir}/crm_rule %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/crm_shadow %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket %{_sbindir}/stonith_admin # "dirname" is owned by -schemas, which is a prerequisite %{_datadir}/pacemaker/report.collector %{_datadir}/pacemaker/report.common # XXX "dirname" is not owned by any prerequisite %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude %{ocf_root}/resource.d/pacemaker/controld %exclude %{ocf_root}/resource.d/pacemaker/remote %dir %{ocf_root} %dir %{ocf_root}/resource.d %{ocf_root}/resource.d/pacemaker %doc %{_mandir}/man7/*pacemaker* %exclude %{_mandir}/man7/pacemaker-based.* %exclude %{_mandir}/man7/pacemaker-controld.* %exclude %{_mandir}/man7/pacemaker-schedulerd.* %exclude %{_mandir}/man7/pacemaker-fenced.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm*.8.gz %doc %{_mandir}/man8/attrd_updater.* %doc %{_mandir}/man8/cibadmin.* %if %{with cibsecrets} %doc %{_mandir}/man8/cibsecret.* %endif %doc %{_mandir}/man8/iso8601.* %doc %{_mandir}/man8/stonith_admin.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles %files -n %{pkgname_pcmk_libs} %{?with_nls:-f %{name}.lang} %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpacemaker.so.* %{_libdir}/libstonithd.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files cluster-libs %{_libdir}/libcrmcluster.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files -n %{python_name}-%{name} %{python3_sitelib}/pacemaker/ %{python3_sitelib}/pacemaker-*.egg-info %exclude %{python3_sitelib}/pacemaker/_cts/ %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files remote %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} # state directory is shared between the subpackets # let rpm take care of removing it once it isn't # referenced anymore and empty %ghost %dir %{_localstatedir}/lib/rpm-state/%{name} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote %endif %{_sbindir}/pacemaker-remoted %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files doc %doc %{pcmk_docdir} %license licenses/CC-BY-SA-4.0 %files cts %{python3_sitelib}/pacemaker/_cts/ %{_datadir}/pacemaker/tests %{_libexecdir}/pacemaker/cts-support %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files -n %{pkgname_pcmk_libs}-devel %{_includedir}/pacemaker %{_libdir}/libcib.so %{_libdir}/liblrmd.so %{_libdir}/libcrmservice.so %{_libdir}/libcrmcommon.so %{_libdir}/libpe_status.so %{_libdir}/libpe_rules.so %{_libdir}/libpacemaker.so %{_libdir}/libstonithd.so %{_libdir}/libcrmcluster.so %{_libdir}/pkgconfig/*pacemaker*.pc %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files schemas %license licenses/GPLv2 %dir %{_datadir}/pacemaker %{_datadir}/pacemaker/*.rng %{_datadir}/pacemaker/*.xsl %{_datadir}/pacemaker/api %{_datadir}/pacemaker/base %{_datadir}/pkgconfig/pacemaker-schemas.pc %changelog * PACKAGE_DATE ClusterLabs PACKAGE_VERSION - See included ChangeLog file for details diff --git a/tools/crm_mon.upstart.in b/tools/crm_mon.upstart.in deleted file mode 100644 index eb4c956c64..0000000000 --- a/tools/crm_mon.upstart.in +++ /dev/null @@ -1,35 +0,0 @@ -# crm_mon - Daemon for pacemaker monitor -# -# - -kill timeout 3600 -respawn -respawn limit 10 3600 - -expect fork - -env prog=crm_mon -env sysconf=@CONFIGDIR@/crm_mon -env rpm_lockdir=@localstatedir@/lock/subsys -env deb_lockdir=@localstatedir@/lock - - -script - [ -f "$sysconf" ] && . "$sysconf" - exec $prog $OPTIONS -end script - -post-start script - [ -f "$sysconf" ] && . "$sysconf" - [ -z "$LOCK_FILE" -a -d "$rpm_lockdir" ] && LOCK_FILE="$rpm_lockdir/$prog" - [ -z "$LOCK_FILE" -a -d "$deb_lockdir" ] && LOCK_FILE="$deb_lockdir/$prog" - touch "$LOCK_FILE" -end script - -post-stop script - [ -f "$sysconf" ] && . "$sysconf" - [ -z "$LOCK_FILE" -a -d "$rpm_lockdir" ] && LOCK_FILE="$rpm_lockdir/$prog" - [ -z "$LOCK_FILE" -a -d "$deb_lockdir" ] && LOCK_FILE="$deb_lockdir/$prog" - rm -f "$LOCK_FILE" -end script -