diff --git a/.gitignore b/.gitignore index 70e69022cc..63ce607e4e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,251 +1,250 @@ # Common \#* .\#* GPATH GRTAGS GTAGS TAGS Makefile Makefile.in .deps .dirstamp .libs *.pc *.pyc *.bz2 *.tar.gz *.tgz *.la *.lo *.o *~ *.gcda *.gcno - # Autobuild aclocal.m4 autoconf autoheader autom4te.cache/ automake build.counter -compile /confdefs.h -config.guess config.log config.status -config.sub configure /conftest* -depcomp -install-sh include/stamp-* libtool libtool.m4 ltdl.m4 libltdl -ltmain.sh -missing -py-compile /m4/argz.m4 /m4/ltargz.m4 /m4/ltoptions.m4 /m4/ltsugar.m4 /m4/ltversion.m4 /m4/lt~obsolete.m4 -test-driver ylwrap # Configure targets /cts/CTS.py /cts/CTSlab.py /cts/CTSvars.py /cts/LSBDummy /cts/OCFIPraTest.py /cts/benchmark/clubench /cts/cluster_test /cts/cts /cts/cts-cli /cts/cts-coverage /cts/cts-exec /cts/cts-fencing /cts/cts-log-watcher /cts/cts-regression /cts/cts-scheduler /cts/cts-support /cts/fence_dummy /cts/lxc_autogen.sh /cts/pacemaker-cts-dummyd /cts/pacemaker-cts-dummyd@.service /daemons/execd/pacemaker_remote /daemons/execd/pacemaker_remote.service /daemons/fenced/fence_legacy /daemons/pacemakerd/pacemaker /daemons/pacemakerd/pacemaker.combined.upstart /daemons/pacemakerd/pacemaker.service /daemons/pacemakerd/pacemaker.upstart /doc/Doxyfile /extra/logrotate/pacemaker /extra/resources/ClusterMon /extra/resources/HealthSMART /extra/resources/SysInfo /extra/resources/ifspeed /extra/resources/o2cb include/config.h include/config.h.in include/crm_config.h /tools/cibsecret /tools/crm_error /tools/crm_failcount /tools/crm_master /tools/crm_mon.service /tools/crm_mon.upstart /tools/crm_report /tools/crm_rule /tools/crm_standby /tools/pcmk_simtimes /tools/report.collector /tools/report.common # Build targets *.7 *.7.xml *.7.html *.8 *.8.xml *.8.html /daemons/attrd/pacemaker-attrd /daemons/based/pacemaker-based /daemons/based/cibmon /daemons/controld/pacemaker-controld /daemons/execd/cts-exec-helper /daemons/execd/pacemaker-execd /daemons/execd/pacemaker-remoted /daemons/fenced/cts-fence-helper /daemons/fenced/pacemaker-fenced /daemons/fenced/pacemaker-fenced.xml /daemons/pacemakerd/pacemakerd /daemons/schedulerd/pacemaker-schedulerd /daemons/schedulerd/pacemaker-schedulerd.xml /doc/.ABI-build /doc/HTML /doc/abi_dumps /doc/abi-check /doc/api/* /doc/compat_reports /doc/crm_fencing.html /doc/sphinx/*/_build /doc/sphinx/*/conf.py /doc/sphinx/shared/images/*.png /lib/common/md5.c /maint/testcc_helper.cc /maint/testcc_*_h /maint/mocked/based scratch /tools/attrd_updater /tools/cibadmin /tools/crmadmin /tools/crm_attribute /tools/crm_diff /tools/crm_mon /tools/crm_node /tools/crm_resource /tools/crm_shadow /tools/crm_simulate /tools/crm_ticket /tools/crm_verify /tools/iso8601 /tools/stonith_admin xml/crm.dtd xml/pacemaker*.rng xml/versions.rng xml/api/api-result*.rng lib/gnu/libgnu.a lib/gnu/stdalign.h *.coverity # Packager artifacts *.rpm /mock /pacemaker.spec /rpm/[A-Z]* # make dist/export working directory pacemaker-[a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9][a-f0-9] # Test detritus /cts/.regression.failed.diff /cts/scheduler/*.ref /cts/scheduler/*.up /cts/scheduler/*.up.err /cts/scheduler/bug-rh-1097457.log /cts/scheduler/bug-rh-1097457.trs /cts/scheduler/shadow.* /cts/test-suite.log /lib/*/tests/*/*.log /lib/*/tests/*/*_test /lib/*/tests/*/*.trs /xml/test-*/*.up /xml/test-*/*.up.err /xml/assets/*.rng /xml/assets/diffview.js /xml/assets/xmlcatalog # Release maintenance detritus /maint/gnulib # Formerly built files (helps when jumping back and forth in checkout) /.ABI-build /Doxyfile /HTML /abi_dumps /abi-check /compat_reports +/compile /attrd /cib +/config.guess +/config.sub /coverage.sh /crmd /cts/HBDummy +/depcomp /doc/*.build /doc/*/en-US/Ap-*.xml /doc/*/en-US/Ch-*.xml /doc/*/publican.cfg /doc/*/publish /doc/*/tmp/** /doc/Clusters_from_Scratch.txt /doc/Pacemaker_Explained.txt /doc/acls.html /doc/publican-catalog* /doc/shared/en-US/*.xml /doc/shared/en-US/images/pcmk-*.png /doc/shared/en-US/images/Policy-Engine-*.png /fencing +/install-sh /lib/common/tests/flags/pcmk__clear_flags_as /lib/common/tests/flags/pcmk__set_flags_as /lib/common/tests/flags/pcmk_all_flags_set /lib/common/tests/flags/pcmk_any_flags_set /lib/common/tests/operations/parse_op_key /lib/common/tests/strings/pcmk__btoa /lib/common/tests/strings/pcmk__parse_ll_range /lib/common/tests/strings/pcmk__scan_double /lib/common/tests/strings/pcmk__str_any_of /lib/common/tests/strings/pcmk__strcmp /lib/common/tests/strings/pcmk__char_in_any_str /lib/common/tests/utils/pcmk_str_is_infinity /lib/common/tests/utils/pcmk_str_is_minus_infinity /lib/pengine/tests/rules/pe_cron_range_satisfied /lrmd +/ltmain.sh /mcp +/missing /pacemaker-*.spec /pengine +/py-compile +/test-driver #Other coverity-* logs *.patch *.diff *.sed *.orig *.rej *.swp diff --git a/.tito/custom.py b/.tito/custom.py index f49f67509b..9f0efec9d9 100644 --- a/.tito/custom.py +++ b/.tito/custom.py @@ -1,63 +1,60 @@ # -*- coding: UTF-8 -*- """Tito ad-hoc module for custom git repo -> spec + archive metamorphosis""" -from __future__ import (print_function, unicode_literals, absolute_import, - division) - __author__ = "Jan Pokorný " -__copyright__ = "Copyright 2016 Red Hat, Inc." +__copyright__ = "Copyright 2016-2020 the Pacemaker project contributors" __license__ = "LGPLv2.1+ WITHOUT ANY WARRANTY" from os.path import basename, join from shutil import copyfile from tito.builder.main import BuilderBase from tito.builder.fetch import FetchBuilder from tito.common import error_out, run_command, get_spec_version_and_release class NativeFetchBuilder(FetchBuilder): """ A specialized FetchBuilder to just setup the specfile + archive using package-native scripts, which currently boils down to a sequence that needs to be configured via fetch_prep_command option in builder section (e.g., "./autogen.sh && ./configure && make dist foo.spec"). Uses code of src/tito/builder/fetch.py from the tito project as a template. """ REQUIRED_ARGS = [] def __init__(self, name=None, tag=None, build_dir=None, config=None, user_config=None, args=None, **kwargs): BuilderBase.__init__(self, name=name, build_dir=build_dir, config=config, user_config=user_config, args=args, **kwargs) if tag: error_out("FetchBuilder does not support building specific tags.") if not config.has_option('builder', 'fetch_prep_command'): error_out("NativeFetchBuilder requires fetch_prep_command.") self.build_tag = '%s-%s' % ( self.project_name, get_spec_version_and_release(self.start_dir, '%s.spec' % self.project_name) ) def tgz(self): self.ran_tgz = True self._create_build_dirs() print("Fetching sources...") run_command(self.config.get('builder', 'fetch_prep_command')) manual_sources = [run_command("ls -1t *.tar.* | head -n1")] self.spec_file = self.project_name + '.spec' for s in manual_sources: base_name = basename(s) dest_filepath = join(self.rpmbuild_sourcedir, base_name) copyfile(s, dest_filepath) self.sources.append(dest_filepath) diff --git a/.travis.yml b/.travis.yml index 73f62eca60..456519eaad 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,112 +1,112 @@ # Copyright 2012-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # Control file for the Travis autobuilder # https://docs.travis-ci.com/user/customizing-the-build/ language: c # We build with both gcc and clang. If MAINT_EXTRA=1 (gcc only), the # schema regression tests will additionally be run. matrix: include: - compiler: gcc env: MAINT_EXTRA=1 arch: amd64 - compiler: clang env: MAINT_EXTRA=0 arch: amd64 - compiler: gcc env: MAINT_EXTRA=0 arch: ppc64le cache: directories: - xml/.relaxng.org # sudo add-apt-repository ppa:hotot-team before_install: - if [ "$TRAVIS_ARCH" == "ppc64le" ]; then sudo add-apt-repository "deb http://ports.ubuntu.com/ubuntu-ports/ trusty main"; sudo apt-get update -qq; fi - if [ "$TRAVIS_ARCH" == "amd64" ]; then sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu/ trusty main"; sudo apt-get update -qq; fi # To switch to Travis-CI's containerized (non-sudo) architecture, # all our dependencies need to be on Travis's whitelist: # https://github.com/travis-ci/apt-package-whitelist # # The only ones that aren't already are: # - cluster-glue-dev: see open issue: # https://github.com/travis-ci/apt-package-whitelist/issues/2936 # - resource-agents: see open issue: # https://github.com/travis-ci/apt-package-whitelist/issues/4261 # - libdbus-1-dev: see multiple open issues: # https://github.com/travis-ci/apt-package-whitelist/issues?utf8=%E2%9C%93&q=is%3Aissue+libdbus+-1-dev # (a workaround is to install libdbus-glib-1-dev, which depends on it and is whitelisted) install: - sudo apt-get install -qq automake autoconf libtool - python python-dev + psmisc procps python3 python3-dev libbz2-dev libdbus-1-dev libglib2.0-dev libgnutls-dev libltdl-dev libncurses5-dev libpam0g-dev libxml2-dev libxslt1-dev uuid-dev libqb-dev libcfg-dev libcmap-dev libcorosync-common-dev libcpg-dev libquorum-dev libsam-dev libtotem-pg-dev libvotequorum-dev cluster-glue-dev resource-agents - test $MAINT_EXTRA -eq 0 || sudo apt-get install -qq libxml2-utils xsltproc before_script: # some tests (e.g. cts-exec-helper) require actual system-wide credentials - ./autogen.sh - ./configure --with-daemon-user=nobody --with-daemon-group=nogroup --libexecdir=/usr/lib/pacemaker --with-configdir=/etc/default script: # Create directories needed by commands used by regression tests - sudo make install-exec-local || true - make - make check - ./cts/cts-cli -V - ./cts/cts-scheduler -V - sudo ./cts/cts-exec -V --force-wait - test $MAINT_EXTRA -eq 0 || { { echo 'looking for presence of control characters...'; { git ls-files | grep -v tap-driver.sh | xargs grep -Ensv "^([^[:cntrl:]]*|$(printf '\t'))*$"||:; } 2>/dev/null | { ! grep -Ev '^Binary file' && echo 'ALL OK'; }; } && ( cd xml; ./regression.sh && ./regression.sh -B && ./regression.sh -S && { schemas=; for schema in *.rng; do case ${schema} in *cibtr*);; *)schemas="${schemas} ${schema}";; esac; done; test -s .relaxng.org/relaxng.rng 2>/dev/null || curl --create-dirs -SsLo .relaxng.org/relaxng.rng 'https://raw.githubusercontent.com/relaxng/relaxng.org/master/relaxng.rng'; xmllint --noout --relaxng .relaxng.org/relaxng.rng ${schemas}; } ); } #after_script: #after_success: after_failure: - lsb_release -a - sudo cat /etc/apt/sources.list - whoami - env | sort - cat include/config.h notifications: irc: "irc.freenode.org#pcmk" # email: # recipients: # - developers@clusterlabs.org # whitelist branches: only: - master - "1.1" - "2.0" diff --git a/GNUmakefile b/GNUmakefile index b930ab306a..e463e39ca2 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,450 +1,450 @@ # # Copyright 2008-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # default: build .PHONY: default # The toplevel "clean" targets are generated from Makefile.am, not this file. # We can't use autotools' CLEANFILES, clean-local, etc. here. Instead, we # define this target, which Makefile.am can use as a dependency of clean-local. EXTRA_CLEAN_TARGETS = ancillary-clean -include Makefile # The main purpose of this GNUmakefile is that its targets can be invoked # without having to call autogen.sh and configure first. That means automake # variables may or may not be defined. Here, we use the current working # directory if a relevant variable hasn't been defined. # # The idea is to keep generated artifacts in the build tree, in case a VPATH # build is in use, but in practice it would be difficult to make the targets # here usable from a different location than the source tree. abs_srcdir ?= $(shell pwd) abs_builddir ?= $(shell pwd) GLIB_CFLAGS ?= $(pkg-config --cflags glib-2.0) PACKAGE ?= pacemaker # Definitions that specify what various targets will apply to COMMIT ?= HEAD # TAG defaults to DIST when not in a git checkout (e.g. from a distribution), # the tag name if COMMIT is tagged, and the full commit ID otherwise. TAG ?= $(shell T=$$(git describe --tags --exact-match '$(COMMIT)' 2>/dev/null); \ test -n "$${T}" && echo "$${T}" \ || git log --pretty=format:%H -n 1 '$(COMMIT)' 2>/dev/null || echo DIST) lparen = ( rparen = ) # SPEC_COMMIT is identical to TAG for DIST and tagged releases, otherwise it is # the short commit ID (which must be used in order for "make export" to use the # same archive name as "make dist") SPEC_COMMIT ?= $(shell \ case $(TAG) in \ Pacemaker-*|DIST$(rparen) \ echo '$(TAG)' ;; \ *$(rparen) \ git log --pretty=format:%h -n 1 '$(TAG)';; \ esac) SPEC_ABBREV = $(shell printf %s '$(SPEC_COMMIT)' | wc -c) LAST_RC ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep rc | head -n 1) ifneq ($(origin VERSION), undefined) LAST_RELEASE ?= Pacemaker-$(VERSION) else LAST_RELEASE ?= $(shell git tag -l | grep Pacemaker | sort -Vr | grep -v rc | head -n 1) endif NEXT_RELEASE ?= $(shell echo $(LAST_RELEASE) | awk -F. '/[0-9]+\./{$$3+=1;OFS=".";print $$1,$$2,$$3}') # This Makefile can create 2 types of distributions: # # - "make dist" is automake's native functionality, based on the various # dist/nodist make variables; it always uses the current sources # # - "make export" is a custom target based on git archive and relevant entries # from .gitattributes; it defaults to current sources but can use any git tag # # Both types use the TARFILE name for the result, though they generate # different contents. # # The directory is named pacemaker-DIST when not in a git checkout (e.g. # from a distribution itself), pacemaker- for tagged # commits, and pacemaker- otherwise. distdir = $(PACKAGE)-$(shell \ case $(TAG) in \ DIST$(rparen) \ echo DIST;; \ Pacemaker-*$(rparen) \ echo '$(TAG)' | cut -c11-;; \ *$(rparen) \ git log --pretty=format:%h -n 1 '$(TAG)';; \ esac) TARFILE = $(abs_builddir)/$(distdir).tar.gz .PHONY: init init: test -e $(top_srcdir)/configure || ./autogen.sh test -e $(abs_builddir)/Makefile || $(abs_builddir)/configure .PHONY: build build: init $(MAKE) $(AM_MAKEFLAGS) core export: if [ ! -f "$(TARFILE)" ]; then \ if [ $(TAG) = dirty ]; then \ git commit -m "DO-NOT-PUSH" -a; \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" HEAD^{tree}; \ git reset --mixed HEAD^; \ else \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" $(TAG)^{tree}; \ fi; \ echo "`date`: Rebuilt $(TARFILE)"; \ else \ echo "`date`: Using existing tarball: $(TARFILE)"; \ fi ## RPM-related targets # Where to put RPM artifacts; possible values: # -# - toplevel (default): RPM sources, spec, and source rpm in top-level build -# directory (everything else uses the usual defaults) -# -# - subtree: RPM sources (i.e. TARFILE) in top-level build directory, +# - subtree (default): RPM sources (i.e. TARFILE) in top-level build directory, # everything else in dedicated "rpm" subdirectory of build tree -RPMDEST ?= toplevel +# +# - toplevel (deprecated): RPM sources, spec, and source rpm in top-level build +# directory, everything else uses the usual rpmbuild defaults +RPMDEST ?= subtree RPM_SPEC_DIR_toplevel = $(abs_builddir) RPM_SRCRPM_DIR_toplevel = $(abs_builddir) RPM_OPTS_toplevel = --define "_sourcedir $(abs_builddir)" \ --define "_specdir $(RPM_SPEC_DIR_toplevel)" \ --define "_srcrpmdir $(RPM_SRCRPM_DIR_toplevel)" RPM_SPEC_DIR_subtree = $(abs_builddir)/rpm/SPECS RPM_SRCRPM_DIR_subtree = $(abs_builddir)/rpm/SRPMS RPM_OPTS_subtree = --define "_sourcedir $(abs_builddir)" \ --define "_topdir $(abs_builddir)/rpm" RPM_SPEC_DIR = $(RPM_SPEC_DIR_$(RPMDEST)) RPM_SRCRPM_DIR = $(RPM_SRCRPM_DIR_$(RPMDEST)) RPM_OPTS = $(RPM_OPTS_$(RPMDEST)) WITH ?= --without doc BUILD_COUNTER ?= build.counter LAST_COUNT = $(shell test ! -e $(BUILD_COUNTER) && echo 0; test -e $(BUILD_COUNTER) && cat $(BUILD_COUNTER)) COUNT = $(shell expr 1 + $(LAST_COUNT)) SPECVERSION ?= $(COUNT) MOCK_DIR = $(abs_builddir)/mock MOCK_OPTIONS ?= --resultdir=$(MOCK_DIR) --no-cleanup-after F ?= $(shell test ! -e /etc/fedora-release && echo 0; test -e /etc/fedora-release && rpm --eval %{fedora}) ARCH ?= $(shell test ! -e /etc/fedora-release && uname -m; test -e /etc/fedora-release && rpm --eval %{_arch}) MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH)) # rpmbuild wrapper that translates "--with[out] FEATURE" into RPM macros # # Unfortunately, at least recent versions of rpm do not support mentioned # switch. To work this around, we can emulate mechanism that rpm uses # internally: unfold the flags into respective macro definitions: # # --with[out] FOO -> --define "_with[out]_FOO --with[out]-FOO" # # $(1) ... WITH string (e.g., --with pre_release --without doc) # $(2) ... options following the initial "rpmbuild" in the command # $(3) ... final arguments determined with $2 (e.g., pacemaker.spec) # # Note that if $(3) is a specfile, extra case is taken so as to reflect # pcmkversion correctly (using in-place modification). # # Also note that both ways to specify long option with an argument # (i.e., what getopt and, importantly, rpm itself support) can be used: # # --with FOO # --with=FOO rpmbuild-with = \ WITH=$$(getopt -o "" -l with:,without: -- $(1)) || exit 1; \ CMD='rpmbuild $(2)'; PREREL=0; \ eval set -- "$${WITH}"; \ while true; do \ case "$$1" in \ --with) CMD="$${CMD} --define \"_with_$$2 --with-$$2\""; \ [ "$$2" != pre_release ] || PREREL=1; shift 2;; \ --without) CMD="$${CMD} --define \"_without_$$2 --without-$$2\""; \ [ "$$2" != pre_release ] || PREREL=0; shift 2;; \ --) shift ; break ;; \ *) echo "cannot parse WITH: $$1"; exit 1;; \ esac; \ done; \ case "$(3)" in \ *.spec) { [ $${PREREL} -eq 0 ] || [ $(LAST_RELEASE) = $(TAG) ]; } \ && sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3) \ || sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(NEXT_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3);; \ esac; \ CMD="$${CMD} $(3)"; \ eval "$${CMD}" # Depend on spec-clean so it gets rebuilt every time $(RPM_SPEC_DIR)/$(PACKAGE).spec: spec-clean rpm/pacemaker.spec.in $(AM_V_at)$(MKDIR_P) $(RPM_SPEC_DIR) # might not exist in VPATH build $(AM_V_GEN)if [ x != x"`git ls-files -m rpm/pacemaker.spec.in 2>/dev/null`" ]; then \ cat $(abs_srcdir)/rpm/pacemaker.spec.in; \ elif git cat-file -e $(TAG):rpm/pacemaker.spec.in 2>/dev/null; then \ git show $(TAG):rpm/pacemaker.spec.in; \ elif git cat-file -e $(TAG):pacemaker.spec.in 2>/dev/null; then \ git show $(TAG):pacemaker.spec.in; \ else \ cat $(abs_srcdir)/rpm/pacemaker.spec.in; \ fi | sed \ -e "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" \ -e 's/global\ specversion\ .*/global\ specversion\ $(SPECVERSION)/' \ -e 's/global\ commit\ .*/global\ commit\ $(SPEC_COMMIT)/' \ -e 's/global\ commit_abbrev\ .*/global\ commit_abbrev\ $(SPEC_ABBREV)/' \ -e "s/PACKAGE_DATE/$$(date +'%a %b %d %Y')/" \ -e "s/PACKAGE_VERSION/$$(git describe --tags $(TAG) | sed -e s:Pacemaker-:: -e s:-.*::)/" \ > "$@" .PHONY: $(PACKAGE).spec $(PACKAGE).spec: $(RPM_SPEC_DIR)/$(PACKAGE).spec .PHONY: spec-clean spec-clean: -rm -f $(RPM_SPEC_DIR)/$(PACKAGE).spec .PHONY: srpm srpm: export srpm-clean $(RPM_SPEC_DIR)/$(PACKAGE).spec if [ -e $(BUILD_COUNTER) ]; then \ echo $(COUNT) > $(BUILD_COUNTER); \ fi $(call rpmbuild-with,$(WITH),-bs $(RPM_OPTS),$(RPM_SPEC_DIR)/$(PACKAGE).spec) .PHONY: srpm-clean srpm-clean: -rm -f $(RPM_SRCRPM_DIR)/*.src.rpm .PHONY: chroot chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG) @echo "Done" .PHONY: mock-next mock-next: $(MAKE) $(AM_MAKEFLAGS) F=$(shell expr 1 + $(F)) mock .PHONY: mock-rawhide mock-rawhide: $(MAKE) $(AM_MAKEFLAGS) F=rawhide mock mock-install-%: @echo "Installing packages" mock --root=$* $(MOCK_OPTIONS) --install $(MOCK_DIR)/*.rpm \ vi sudo valgrind lcov gdb fence-agents psmisc .PHONY: mock-install mock-install: mock-install-$(MOCK_CFG) @echo "Done" .PHONY: mock-sh mock-sh: mock-sh-$(MOCK_CFG) @echo "Done" mock-sh-%: @echo "Connecting" mock --root=$* $(MOCK_OPTIONS) --shell @echo "Done" mock-%: srpm mock-clean mock $(MOCK_OPTIONS) --root=$* --no-cleanup-after --rebuild \ $(WITH) $(RPM_SRCRPM_DIR)/*.src.rpm .PHONY: mock mock: mock-$(MOCK_CFG) @echo "Done" .PHONY: dirty dirty: $(MAKE) $(AM_MAKEFLAGS) TAG=dirty mock .PHONY: mock-clean mock-clean: -rm -rf $(MOCK_DIR) .PHONY: rpm-dep rpm-dep: $(RPM_SPEC_DIR)/$(PACKAGE).spec sudo yum-builddep $(PACKAGE).spec # e.g. make WITH="--with pre_release" rpm .PHONY: rpm rpm: srpm @echo To create custom builds, edit the flags and options in $(PACKAGE).spec first $(call rpmbuild-with,$(WITH),$(RPM_OPTS),--rebuild $(RPM_SRCRPM_DIR)/*.src.rpm) .PHONY: rpmlint rpmlint: $(RPM_SPEC_DIR)/$(PACKAGE).spec rpmlint -f rpm/rpmlintrc "$<" .PHONY: release release: $(MAKE) $(AM_MAKEFLAGS) TAG=$(LAST_RELEASE) rpm .PHONY: rc rc: $(MAKE) $(AM_MAKEFLAGS) TAG=$(LAST_RC) rpm ## Static analysis via coverity # Aggressiveness (low, medium, or high) COVLEVEL ?= low # Generated outputs COVERITY_DIR = $(abs_builddir)/coverity-$(TAG) COVTAR = $(abs_builddir)/$(PACKAGE)-coverity-$(TAG).tgz COVEMACS = $(abs_builddir)/$(TAG).coverity COVHTML = $(COVERITY_DIR)/output/errors # Coverity outputs are phony so they get rebuilt every invocation .PHONY: $(COVERITY_DIR) $(COVERITY_DIR): init core-clean coverity-clean $(AM_V_GEN)cov-build --dir "$@" $(MAKE) $(AM_MAKEFLAGS) core # Public coverity instance .PHONY: $(COVTAR) $(COVTAR): $(COVERITY_DIR) $(AM_V_GEN)tar czf "$@" --transform="s@.*$(TAG)@cov-int@" "$<" .PHONY: coverity coverity: $(COVTAR) @echo "Now go to https://scan.coverity.com/users/sign_in and upload:" @echo " $(COVTAR)" @echo "then make core-clean coverity-clean" # Licensed coverity instance # # The prerequisites are a little hacky; rather than actually required, some # of them are designed so that things execute in the proper order (which is # not the same as GNU make's order-only prerequisites). .PHONY: coverity-analyze coverity-analyze: $(COVERITY_DIR) @echo "" @echo "Analyzing (waiting for coverity license if necessary) ..." cov-analyze --dir "$<" --wait-for-license --security \ --aggressiveness-level "$(COVLEVEL)" .PHONY: $(COVEMACS) $(COVEMACS): coverity-analyze $(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --emacs-style > "$@" .PHONY: $(COVHTML) $(COVHTML): $(COVEMACS) $(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --html-output "$@" .PHONY: coverity-corp coverity-corp: $(COVHTML) $(MAKE) $(AM_MAKEFLAGS) core-clean @echo "Done. See:" @echo " file://$(COVHTML)/index.html" @echo "When no longer needed, make coverity-clean" # Remove all outputs regardless of tag .PHONY: coverity-clean coverity-clean: -rm -rf "$(abs_builddir)"/coverity-* \ "$(abs_builddir)"/$(PACKAGE)-coverity-*.tgz \ "$(abs_builddir)"/*.coverity ## Change log generation summary: @printf "\n* `date +"%a %b %d %Y"` `git config user.name` <`git config user.email`> $(NEXT_RELEASE)" @printf "\n- Changesets: `git log --pretty=oneline --no-merges $(LAST_RELEASE)..HEAD | wc -l`" @printf "\n- Diff:\n" @git diff $(LAST_RELEASE)..HEAD --shortstat include lib daemons tools xml rc-changes: @$(MAKE) $(AM_MAKEFLAGS) NEXT_RELEASE=$(shell echo $(LAST_RC) | sed s:-rc.*::) LAST_RELEASE=$(LAST_RC) changes changes: summary @printf "\n- Features added since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e Feature: | sed -e 's@Feature:@@' | sort -uf @printf "\n- Changes since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --no-merges --abbrev-commit $(LAST_RELEASE)..HEAD \ | grep -e High: -e Fix: -e Bug | sed \ -e 's@\(Fix\|High\|Bug\):@@' \ -e 's@\(cib\|pacemaker-based\|based\):@CIB:@' \ -e 's@\(crmd\|pacemaker-controld\|controld\):@controller:@' \ -e 's@\(lrmd\|pacemaker-execd\|execd\):@executor:@' \ -e 's@\(Fencing\|stonithd\|stonith\|pacemaker-fenced\|fenced\):@fencing:@' \ -e 's@\(PE\|pengine\|pacemaker-schedulerd\|schedulerd\):@scheduler:@' \ | sort -uf authors: git log $(LAST_RELEASE)..$(COMMIT) --format='%an' | sort -u changelog: @$(MAKE) $(AM_MAKEFLAGS) changes > ChangeLog @printf "\n">> ChangeLog git show $(LAST_RELEASE):ChangeLog >> ChangeLog DO_NOT_INDENT = lib/gnu daemons/controld/controld_fsa.h indent: find . -name "*.[ch]" -exec ./p-indent \{\} \; git co HEAD $(DO_NOT_INDENT) rel-tags: tags find . -name TAGS -exec sed -i 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \; CLANG_analyzer = $(shell which scan-build) CLANG_checkers = # Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.: # --enable={warning,style,performance,portability,information,all} # --inconclusive --std=posix CPPCHECK_ARGS ?= BASE_CPPCHECK_ARGS = -I include --max-configs=30 --library=posix --library=gnu \ --library=gtk $(GLIB_CFLAGS) -D__GNUC__ --inline-suppr -q cppcheck-all: cppcheck $(CPPCHECK_ARGS) $(BASE_CPPCHECK_ARGS) -DBUILD_PUBLIC_LIBPACEMAKER \ -DDEFAULT_CONCURRENT_FENCING_TRUE replace lib daemons tools cppcheck: cppcheck $(CPPCHECK_ARGS) $(BASE_CPPCHECK_ARGS) replace lib daemons tools clang: test -e $(CLANG_analyzer) scan-build $(CLANG_checkers:%=-enable-checker %) $(MAKE) $(AM_MAKEFLAGS) clean all # V3 = scandir unsetenv alphasort xalloc # V2 = setenv strerror strchrnul strndup # https://www.gnu.org/software/gnulib/manual/html_node/Initial-import.html#Initial-import # previously, this was crypto/md5, but got spoiled with streams/kernel crypto GNU_MODS = crypto/md5-buffer # stdint appears to be surrogate only for C99-lacking environments GNU_MODS_AVOID = stdint # only for plain crypto/md5: we make do without kernel-assisted crypto # GNU_MODS_AVOID += crypto/af_alg gnulib-update: -test -e maint/gnulib \ || git clone https://git.savannah.gnu.org/git/gnulib.git maint/gnulib cd maint/gnulib && git pull maint/gnulib/gnulib-tool \ --source-base=lib/gnu --lgpl=2 --no-vc-files --no-conditional-dependencies \ $(GNU_MODS_AVOID:%=--avoid %) --import $(GNU_MODS) ancillary-clean: spec-clean srpm-clean mock-clean coverity-clean -rm -f $(TARFILE) diff --git a/INSTALL.md b/INSTALL.md index dc4fe00805..c4af9f7217 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,76 +1,79 @@ # How to Install Pacemaker ## Build Dependencies | Version | Fedora-based | Suse-based | Debian-based | |:---------------:|:------------------:|:------------------:|:--------------:| | 1.11 or later | automake | automake | automake | | 2.64 or later | autoconf | autoconf | autoconf | | | libtool | libtool | libtool | | | libtool-ltdl-devel | | libltdl-dev | | | libuuid-devel | libuuid-devel | uuid-dev | | | pkgconfig | pkgconfig | pkg-config | -| 2.16.0 or later | glib2-devel | glib2-devel | libglib2.0-dev | +| 2.32.0 or later | glib2-devel | glib2-devel | libglib2.0-dev | | | libxml2-devel | libxml2-devel | libxml2-dev | | | libxslt-devel | libxslt-devel | libxslt-dev | | | bzip2-devel | libbz2-devel | libbz2-dev | | | libqb-devel | libqb-devel | libqb-dev | +| 3.2 or later | python3 | python3 | python3 | -Also: GNU make, and Python 2.7 or Python 3.2 or later +Also: GNU make ### Cluster Stack Dependencies *Only corosync is currently supported* | Version | Fedora-based | Suse-based | Debian-based | |:---------------:|:------------------:|:------------------:|:--------------:| | 2.0.0 or later | corosynclib | libcorosync | corosync | | 2.0.0 or later | corosynclib-devel | libcorosync-devel | | | | | | libcfg-dev | | | | | libcpg-dev | | | | | libcmap-dev | | | | | libquorum-dev | ### Optional Build Dependencies | Feature Enabled | Version | Fedora-based | Suse-based | Debian-based | |:-----------------------------------------------:|:--------------:|:-----------------------:|:-----------------------:|:-----------------------:| | Pacemaker Remote and encrypted remote CIB admin | 2.1.7 or later | gnutls-devel | libgnutls-devel | libgnutls-dev | | encrypted remote CIB admin | | pam-devel | pam-devel | libpam0g-dev | | interactive crm_mon | | ncurses-devel | ncurses-devel | ncurses-dev | | systemd support | | systemd-devel | systemd-devel | libsystemd-dev | | systemd/upstart resource support | | dbus-devel | dbus-devel | libdbus-1-dev | | Linux-HA style fencing agents | | cluster-glue-libs-devel | libglue-devel | cluster-glue-dev | | documentation | | asciidoc or asciidoctor | asciidoc or asciidoctor | asciidoc or asciidoctor | | documentation | | help2man | help2man | help2man | | documentation | | inkscape | inkscape | inkscape | | documentation | | docbook-style-xsl | docbook-xsl-stylesheets | docbook-xsl | -| documentation | | python-sphinx or python3-sphinx | python-sphinx or python3-sphinx | python-sphinx or python3-sphinx | +| documentation | | python3-sphinx | python3-sphinx | python3-sphinx | | documentation (PDF) | | texlive, texlive-titlesec, texlive-framed, texlive-threeparttable texlive-wrapfig texlive-multirow | texlive, texlive-latex | texlive, texlive-latex-extra | +| RPM packages via "make rpm" | 4.11 or later | rpm | rpm | (n/a) | ## Optional testing dependencies +* procps and psmisc (if running cts-exec, cts-fencing, or CTS) * valgrind (if running CTS valgrind tests) -* systemd-python (if using CTS on cluster nodes running systemd) +* python3-systemd (if using CTS on cluster nodes running systemd) * rsync (if running CTS container tests) * libvirt-daemon-driver-lxc (if running CTS container tests) * libvirt-daemon-lxc (if running CTS container tests) * libvirt-login-shell (if running CTS container tests) * nmap (if not specifying an IP address base) * oprofile (if running CTS profiling tests) * dlm (to log DLM debugging info after CTS tests) ## Simple install $ make && sudo make install If GNU make is not your default make, use "gmake" instead. ## Detailed install First, browse the build options that are available: $ ./autogen.sh $ ./configure --help Re-run ./configure with any options you want, then proceed with the simple method. diff --git a/Makefile.am b/Makefile.am index 3be5949b02..8c926bb113 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,130 +1,133 @@ # # Copyright 2003-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # +# This directory must be same as in configure.ac's AC_CONFIG_MACRO_DIR +ACLOCAL_AMFLAGS = -I m4 + # m4/glibtests.m4 is copied from https://gitlab.gnome.org/GNOME/glib/blob/master/m4macros/glibtests.m4. EXTRA_DIST = CONTRIBUTING.md \ GNUmakefile \ INSTALL.md \ README.markdown \ autogen.sh \ m4/glibtests.m4 \ m4/gnulib-cache.m4 \ m4/gnulib-tool.m4 \ rpm/rpmlintrc \ rpm/pacemaker.spec.in DISTCLEANFILES = config.status MAINTAINERCLEANFILES = Makefile.in \ aclocal.m4 \ config.guess \ config.sub \ configure \ depcomp \ install-sh \ ltmain.sh \ missing \ py-compile \ test-driver # Don't try to install files outside build directory for "make distcheck". AM_DISTCHECK_CONFIGURE_FLAGS = --prefix="$$dc_install_base/usr" \ --sysconfdir="$$dc_install_base/etc" \ --with-initdir="$$dc_install_base/etc/init.d" \ --with-ocfdir="$$dc_install_base/usr/lib/ocf" \ --with-systemdsystemunitdir="$$dc_install_base$(systemdsystemunitdir)" # Only these will get installed with a plain "make install" CORE_INSTALL = replace include lib daemons tools xml # Only these will get built with a plain "make" or "make clean" CORE = $(CORE_INSTALL) cts SUBDIRS = $(CORE) devel doc extra maint tests AM_CPPFLAGS = -I$(top_srcdir)/include doc_DATA = README.markdown COPYING licensedir = $(docdir)/licenses/ dist_license_DATA = $(wildcard licenses/*) # Scratch file for ad-hoc testing EXTRA_PROGRAMS = scratch nodist_scratch_SOURCES = scratch.c scratch_LDADD = $(top_builddir)/lib/common/libcrmcommon.la # Directories that should be created on install and removed on uninstall ## owned by root:haclient, mode 0750 ROOT_DIRS = $(PACEMAKER_CONFIG_DIR) ## owned by hacluster:haclient, mode 0750 DAEMON_R_DIRS = $(CRM_CONFIG_DIR) \ $(CRM_CORE_DIR) \ $(CRM_BLACKBOX_DIR) ## owned by hacluster:haclient, mode 0770 DAEMON_RW_DIRS = $(CRM_BUNDLE_DIR) \ $(CRM_LOG_DIR) core: @echo "Building only core components and tests: $(CORE)" @for subdir in $(CORE); do \ echo "Building $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir all || exit 1; \ done core-install: @echo "Installing only core components: $(CORE_INSTALL)" @for subdir in $(CORE_INSTALL); do \ echo "Installing $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir install || exit 1; \ done core-clean: @echo "Cleaning only core components and tests: $(CORE)" @for subdir in $(CORE); do \ echo "Cleaning $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir clean || exit 1; \ done install-exec-local: for DIR in $(ROOT_DIRS) $(DAEMON_R_DIRS); do \ $(INSTALL) -d -m 750 "$(DESTDIR)/$$DIR"; \ done for DIR in $(DAEMON_RW_DIRS); do \ $(INSTALL) -d -m 770 "$(DESTDIR)/$$DIR"; \ done -for DIR in $(ROOT_DIRS); do \ chgrp $(CRM_DAEMON_GROUP) "$(DESTDIR)/$$DIR"; \ done -for DIR in $(DAEMON_R_DIRS) $(DAEMON_RW_DIRS); do \ chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) "$(DESTDIR)/$$DIR"; \ done # Remove created directories only if they're empty uninstall-hook: -for DIR in $(ROOT_DIRS) $(DAEMON_R_DIRS) $(DAEMON_RW_DIRS); do \ rmdir "$(DESTDIR)/$$DIR"; \ done clean-generic: -rm -f *.tar.bz2 *.sed PACKAGE ?= pacemaker # In a normal build, this file is included by GNUmakefile, which serves as the # "real" makefile. But in a VPATH build, GNUmakefile won't exist in the build # tree, and this file will be the "real" makefile. EXTRA_CLEAN_TARGETS handles # both cases: GNUmakefile defines it before including this file, so the # clean-local target can clean up files created by GNUmakefile targets. # If this file is used alone, the variable will be undefined. clean-local: $(EXTRA_CLEAN_TARGETS) -rm -f scratch $(builddir)/$(PACKAGE)-*.tar.gz distclean-local: -rm -rf libltdl autom4te.cache diff --git a/configure.ac b/configure.ac index 62434a368d..14ea3cbd83 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2087 +1,1975 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2020 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) -AC_CONFIG_MACRO_DIR([m4]) +dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08). +dnl Once we can require that version, we can simplify this, and no longer +dnl need ACLOCAL_AMFLAGS in Makefile.am. +m4_ifdef([AC_CONFIG_MACRO_DIRS], + [AC_CONFIG_MACRO_DIRS([m4])], + [AC_CONFIG_MACRO_DIR([m4])]) + AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services -m4_include([version.m4]) +m4_include([m4/version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" -AC_CONFIG_AUX_DIR(.) +LT_CONFIG_LTDL_DIR([libltdl]) +AC_CONFIG_AUX_DIR([libltdl/config]) AC_CANONICAL_HOST -dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) +dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) dnl 1.11: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl silent-rules: allow "--enable-silent-rules" (no-op in 1.13+) dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.11 foreign tar-ustar silent-rules subdir-objects]) dnl Require pkg-config (with a minimum version) PKG_PROG_PKG_CONFIG(0.18) AS_IF([test "x${PKG_CONFIG}" != x], [], - [AC_MSG_ERROR([pkgconfig must be installed to build ${PACKAGE}])]) + [AC_MSG_FAILURE([Could not find required build tool pkg-config (0.18 or later)])]) dnl PKG_NOARCH_INSTALLDIR is not available prior to pkg-config 0.27 and dnl pkgconf 0.8.10 (uncomment next line to mimic that scenario) dnl m4_ifdef([PKG_NOARCH_INSTALLDIR], [m4_undefine([PKG_NOARCH_INSTALLDIR])]) m4_ifndef([PKG_NOARCH_INSTALLDIR], [ AC_DEFUN([PKG_NOARCH_INSTALLDIR], [ AC_SUBST([noarch_pkgconfigdir], ['${datadir}/pkgconfig']) ]) ]) PKG_NOARCH_INSTALLDIR dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ dnl Versioned attributes implementation is not yet production-ready AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes]) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd GLIB_TESTS dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== -AC_PROG_CC dnl Can force other with environment variable "CC". +dnl A particular compiler can be forced by setting the CC environment variable +AC_PROG_CC + +dnl Use at least C99 if possible. This will generate an "obsolete" warning +dnl since autoconf 2.70, but is needed for older versions. AC_PROG_CC_STDC -AC_PROG_CXX dnl C++ is not needed for build, just maintainer utilities + +dnl C++ is not needed for build, just maintainer utilities +AC_PROG_CXX dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs: dnl "The macro gl_EARLY must be called as soon as possible after verifying that dnl the C compiler is working. ... The core part of the gnulib checks are done dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL dnl as a dependency. gl_EARLY gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT # --enable-new-dtags: Use RUNPATH instead of RPATH. # It is necessary to have this done before libtool does linker detection. # See also: https://github.com/kronosnet/kronosnet/issues/107 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags], [AM_LDFLAGS=-Wl,--enable-new-dtags], [AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])]) AC_SUBST([AM_LDFLAGS]) saved_LDFLAGS="$LDFLAGS" LDFLAGS="$AM_LDFLAGS $LDFLAGS" LT_INIT([dlopen]) LDFLAGS="$saved_LDFLAGS" LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" - AC_MSG_CHECKING(whether $CC supports "$@") + AC_MSG_CHECKING([whether $CC supports $@]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], - [RC=0; AC_MSG_RESULT(yes)], - [RC=1; AC_MSG_RESULT(no)]) + [RC=0; AC_MSG_RESULT([yes])], + [RC=1; AC_MSG_RESULT([no])]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } dnl =============================================== dnl Configure Options dnl =============================================== dnl Actual library checks come later, but pkg-config can be used here to grab dnl external values to use as defaults for configure options dnl --enable-* options -AC_ARG_ENABLE([ansi], - [AS_HELP_STRING([--enable-ansi], - [force GCC to compile to ANSI standard for older compilers. @<:@no@:>@])], -) - AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@yes@:>@])], ) AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])], -) - -AC_ARG_ENABLE([no-stack], - [AS_HELP_STRING([--enable-no-stack], - [build only the scheduler and its requirements @<:@no@:>@])], + [], + [enable_quiet=no], ) AC_ARG_ENABLE([upstart], [AS_HELP_STRING([--enable-upstart], [enable support for managing resources via Upstart @<:@try@:>@])], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])], [], [enable_systemd=try], ) AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) -# By default, we add symlinks at the pre-2.0.0 daemon name locations, so that: -# (1) tools that directly invoke those names for metadata etc. will still work -# (2) this installation can be used in a bundle container image used with -# cluster hosts running Pacemaker 1.1.17+ -# If you know your target systems will not have any need for it, you can -# disable this option. Once the above use cases are no longer in wide use, we -# can disable this option by default, and once we no longer want to support -# them at all, we can drop the option altogether. +# Add an option to create symlinks at the pre-2.0.0 daemon name locations, so +# that users and tools can continue to invoke those names directly (e.g., for +# meta-data). This option will be deprecated in a future release. AC_ARG_ENABLE([legacy-links], [AS_HELP_STRING([--enable-legacy-links], - [add symlinks for old daemon names @<:@yes@:>@])], + [add symlinks for old daemon names @<:@no@:>@])], [ LEGACY_LINKS="${enableval}" ], - [ LEGACY_LINKS=yes ], + [ LEGACY_LINKS=no ], ) AM_CONDITIONAL(BUILD_LEGACY_LINKS, test "x${LEGACY_LINKS}" = "xyes") dnl --with-* options AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACKAGE_VERSION="$withval" ])] ) VERSION_ARG(VERSION_NUMBER) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer])], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios remote monitoring])], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) AC_ARG_WITH([acl], [AS_HELP_STRING([--with-acl], [support CIB ACL])], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets])], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) PCMK_GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], [directory for init (rc) scripts])], [ INITDIR="$withval" ] ) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) SUPPORT_PROFILING=0 AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations for effective profiling])], [ SUPPORT_PROFILING=$withval ] ) AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations for effective profiling])], [ SUPPORT_COVERAGE=$withval ] ) BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], [address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@])], [ BUG_URL="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) AC_ARG_WITH([sanitizers], [AS_HELP_STRING([--with-sanitizers=...,...], [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])], [ SANITIZERS="$withval" ], [ SANITIZERS="" ]) -dnl The not-yet-released autoconf 2.70 will have a --runstatedir option. -dnl Until that's available, emulate it with our own --with-runstatedir. +dnl --runstatedir is available as of autoconf 2.70 (2020-12-08). When users +dnl have an older version, they can use our --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) dnl This defaults to /usr/lib rather than libdir because it's determined by the dnl OCF project and not pacemaker. Even if a user wants to install pacemaker to dnl /usr/local or such, the OCF agents will be expected in their usual dnl location. However, we do give the user the option to override it. OCF_ROOT_DIR="/usr/lib/ocf" AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], [OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@/usr/lib/ocf@:>@])], [ OCF_ROOT_DIR="$withval" ] ) AC_SUBST(OCF_ROOT_DIR) dnl Get default from fence-agents if available PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix], [PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"], [PCMK__FENCE_BINDIR="$sbindir"]) AC_ARG_WITH([fence-bindir], [AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([ directory for executable fence agents @<:@value from fence-agents package if available otherwise SBINDIR@:>@]))], [ PCMK__FENCE_BINDIR="$withval" ] ) AC_SUBST(PCMK__FENCE_BINDIR) CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) -dnl Deprecated options - -AC_ARG_WITH([pkg-name], - [AS_HELP_STRING([--with-pkg-name=name], - [deprecated and unused (will be removed in a future release)])], -) - -AC_ARG_WITH([pkgname], - [AS_HELP_STRING([--with-pkgname=name], - [deprecated and unused (will be removed in a future release)])], -) - - dnl =============================================== dnl General Processing dnl =============================================== AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", [Current pacemaker version]) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) AC_PROG_LN_S AC_PROG_MKDIR_P -if cc_supports_flag -Werror; then - WERROR="-Werror" -else - WERROR="" -fi +AS_IF([cc_supports_flag -Werror], [WERROR="-Werror"], [WERROR=""]) # Normalize enable_fatal_warnings (defaulting to yes, when compiler supports it) if test "x${enable_fatal_warnings}" != "xno" ; then if test "$GCC" = "yes" && test "x${WERROR}" != "x" ; then enable_fatal_warnings=yes else - AC_MSG_NOTICE(Compiler does not support fatal warnings) + AC_MSG_NOTICE([Compiler does not support fatal warnings]) enable_fatal_warnings=no fi fi -INIT_EXT="" -echo Our Host OS: $host_os/$host - -AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) -case $prefix in - NONE) - prefix=/usr - dnl Fix default variables - "prefix" variable if not specified - if test "$localstatedir" = "\${prefix}/var"; then - localstatedir="/var" - fi - if test "$sysconfdir" = "\${prefix}/etc"; then - sysconfdir="/etc" - fi - ;; -esac +AC_MSG_NOTICE([Sanitizing prefix: ${prefix}]) +AS_IF([test "$prefix" = "NONE"], + [ + prefix=/usr + dnl Fix default variables - "prefix" variable if not specified + AS_IF([test "$localstatedir" = "\${prefix}/var"], + [localstatedir="/var"]) + AS_IF([test "$sysconfdir" = "\${prefix}/etc"], + [sysconfdir="/etc"]) + ]) -AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) +AC_MSG_NOTICE([Sanitizing exec_prefix: ${exec_prefix}]) case $exec_prefix in prefix|NONE) exec_prefix=$prefix ;; esac -AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) +AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}]) case $INITDIR in prefix) INITDIR=$prefix;; "") - AC_MSG_CHECKING(which init (rc) directory to use) + AC_MSG_CHECKING([which init (rc) directory to use]) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done - AC_MSG_RESULT($INITDIR) + AC_MSG_RESULT([$INITDIR]) ;; esac AC_SUBST(INITDIR) -AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) +AC_MSG_NOTICE([Sanitizing libdir: ${libdir}]) case $libdir in prefix|NONE) - AC_MSG_CHECKING(which lib directory to use) + AC_MSG_CHECKING([which lib directory to use]) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done - AC_MSG_RESULT($libdir); + AC_MSG_RESULT([$libdir]); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables if [ test "x${runstatedir}" = "x" ]; then if [ test "x${pcmk_runstatedir}" = "x" ]; then runstatedir="${localstatedir}/run" else runstatedir="${pcmk_runstatedir}" fi fi eval runstatedir="$(eval echo ${runstatedir})" AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) if test x"${CRM_LOG_DIR}" = x""; then CRM_LOG_DIR="${localstatedir}/log/pacemaker" fi AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) if test x"${CRM_BUNDLE_DIR}" = x""; then CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles" fi AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) eval PCMK__FENCE_BINDIR="`eval echo ${PCMK__FENCE_BINDIR}`" AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR", [Location for executable fence agents]) -if test x"${PCMK_GNUTLS_PRIORITIES}" = x""; then - AC_MSG_ERROR([Empty string not applicable with --with-gnutls-priorities]) -fi +AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [], + [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])]) AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) if test x"${BUG_URL}" = x""; then BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker" fi AC_SUBST(BUG_URL) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then - AC_MSG_WARN([$j directory ($dirname) does not exist!]) + AC_MSG_WARN([$j directory ($dirname) does not exist (yet)]) fi done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([US_AUTH_PEERCRED_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([US_AUTH_PEERCRED_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) -if test -z "${us_auth}"; then +AS_IF([test -z "${us_auth}"], [ # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([US_AUTH_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([US_AUTH_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ - AC_MSG_ERROR([No way to authenticate a Unix socket peer]) + AC_MSG_FAILURE([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) -fi - -dnl This OS-based decision-making is poor autotools practice; -dnl feature-based mechanisms are strongly preferred. -dnl -dnl So keep this section to a bare minimum; regard as a "necessary evil". +]) +dnl OS-based decision-making is poor autotools practice; feature-based +dnl mechanisms are strongly preferred. Keep this section to a bare minimum; +dnl regard as a "necessary evil". +INIT_EXT="" +PROCFS=0 case "$host_os" in + dnl Solaris and some *BSD versions support procfs but not files we need *bsd*) - AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) INIT_EXT=".sh" ;; - *solaris*) - AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) - ;; *linux*) - AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) + PROCFS=1 ;; darwin*) - AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_SUBST(INIT_EXT) -AC_MSG_NOTICE(Host CPU: $host_cpu) +AC_DEFINE_UNQUOTED([SUPPORT_PROCFS], [$PROCFS], + [Define to 1 if procfs is supported]) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac ;; esac # C99 doesn't guarantee uint64_t type and related format specifiers, but # prerequisites, corosync + libqb, use that widely, so the target platforms # are already pre-constrained to those "64bit-clean" (doesn't imply native # bit width) and hence we deliberately refrain from artificial surrogates # (sans manipulation through cached values). AC_CACHE_VAL( [pcmk_cv_decl_inttypes], [ AC_CHECK_DECLS( [PRIu64, PRIu32, PRIx32, SCNu64], [pcmk_cv_decl_inttypes="PRIu64 PRIu32 PRIx32 SCNu64"], [ # test shall only react on "no" cached result & error out respectively if test "x$ac_cv_have_decl_PRIu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint64_t (PRIu64)]) elif test "x$ac_cv_have_decl_PRIu32" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint32_t (PRIu32)]) elif test "x$ac_cv_have_decl_PRIx32" = xno; then AC_MSG_ERROR([lack of inttypes.h based hexa specifier serving uint32_t (PRIx32)]) elif test "x$ac_cv_have_decl_SCNu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier gathering uint64_t (SCNu64)]) fi ], [[#include ]] ) ] ) ( set $pcmk_cv_decl_inttypes AC_DEFINE_UNQUOTED([U64T], [$1], [Correct format specifier for U64T]) AC_DEFINE_UNQUOTED([U32T], [$2], [Correct format specifier for U32T]) AC_DEFINE_UNQUOTED([X32T], [$3], [Correct format specifier for X32T]) AC_DEFINE_UNQUOTED([U64TS], [$4], [Correct format specifier for U64TS]) ) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) +AS_IF([test "x${LIBTOOL}" != "x"], [], + [AC_MSG_FAILURE([Could not find required build tool libtool (or equivalent)])]) dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path -if test x"${PYTHON}" != x""; then - AC_PATH_PROG([PYTHON], [$PYTHON]) -fi +AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])]) -case "x$PYTHON" in - x*python3*|x*platform-python*) - dnl When used with Python 3, Pacemaker requires a minimum of 3.2 - AM_PATH_PYTHON([3.2]) - ;; - *) - dnl Otherwise, Pacemaker requires a minimum of 2.7 - AM_PATH_PYTHON([2.7]) - ;; -esac +dnl Pacemaker requires a minimum Python version of 3.2 +AM_PATH_PYTHON([3.2]) AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) -dnl BASH is already an environment variable, so use something else + +dnl Bash is needed for building man pages and running regression tests. +dnl BASH is already an environment variable, so use something else. AC_PATH_PROG([BASH_PATH], [bash]) +AS_IF([test "x${BASH_PATH}" != "x"], [], + [AC_MSG_FAILURE([Could not find required build tool bash])]) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) -if test x"${LIBTOOL}" = x""; then - AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) -fi - -dnl Bash is needed for building man pages and running regression tests -if test x"${BASH_PATH}" = x""; then - AC_MSG_ERROR(bash must be installed in order to build ${PACKAGE}) -fi - AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then - AC_MSG_CHECKING(docbook to manpage transform) + AC_MSG_CHECKING([for DocBook-to-manpage transform]) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) - for d in ${DIRS}; do + for d in ${DIRS} + do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi -AC_MSG_RESULT($MANPAGE_XSLT) +AC_MSG_RESULT([$MANPAGE_XSLT]) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) if test "x${ASCIIDOC_CONV}" != x; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""]) +AM_COND_IF([BUILD_SPHINX_DOCS], [PCMK_FEATURES="$PCMK_FEATURES books"]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: -for PATH_DIR in $PATH; do +for PATH_DIR in $PATH +do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then $GETOPT_PATH -T >/dev/null 2>/dev/null if test $? -eq 4; then break fi fi GETOPT_PATH="" done IFS=$IFS_orig -if test -n "$GETOPT_PATH"; then - AC_MSG_RESULT([$GETOPT_PATH]) -else - AC_MSG_RESULT([no]) - AC_MSG_ERROR(Pacemaker build requires a GNU-compatible getopt) -fi +AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])], + [ + AC_MSG_RESULT([no]) + AC_MSG_ERROR([Could not find required build tool GNU-compatible getopt]) + ]) AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolerably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolerably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) +dnl UUID functions may be in the standard library or a separate -luuid +AC_CHECK_LIB([uuid], [uuid_parse]) + AC_CHECK_FUNCS([sched_setscheduler]) if test "$ac_cv_func_sched_setscheduler" != yes; then PC_LIBS_RT="" else PC_LIBS_RT="-lrt" fi AC_SUBST(PC_LIBS_RT) -AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary -AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions - -AC_CHECK_HEADERS(uuid/uuid.h) - -if test "x$ac_cv_func_uuid_unparse" != xyes; then - AC_MSG_ERROR(You do not have the libuuid development package installed) -fi - -# Require glib 2.16.0 (2008-03) or later for g_hash_table_iter_init() etc. -PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.16.0], +# Require glib 2.32.0 (2012-03) or later +PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.32.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi +PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], + [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" + LIBS="${LIBS} ${LIBXML2_LIBS}"]) + +AC_CHECK_LIB([xslt], [xsltApplyStylesheet], [], + [AC_MSG_FAILURE([Could not find required C library libxslt])]) + dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) -if test "x${enable_fatal_warnings}" = xyes ; then - cc_temp_flags "$CFLAGS $WERROR" -fi -AC_CHECK_HEADERS(arpa/inet.h) -AC_CHECK_HEADERS(ctype.h) -AC_CHECK_HEADERS(dirent.h) -AC_CHECK_HEADERS(errno.h) -AC_CHECK_HEADERS(getopt.h) -AC_CHECK_HEADERS(glib.h) -AC_CHECK_HEADERS(grp.h) -AC_CHECK_HEADERS(limits.h) -AC_CHECK_HEADERS(linux/swab.h) -AC_CHECK_HEADERS(malloc.h) -AC_CHECK_HEADERS(netdb.h) -AC_CHECK_HEADERS(netinet/in.h) -AC_CHECK_HEADERS(netinet/ip.h) -AC_CHECK_HEADERS(pwd.h) -AC_CHECK_HEADERS(sgtty.h) -AC_CHECK_HEADERS(signal.h) -AC_CHECK_HEADERS(stdarg.h) -AC_CHECK_HEADERS(stddef.h) -AC_CHECK_HEADERS(stdio.h) -AC_CHECK_HEADERS(stdlib.h) -AC_CHECK_HEADERS(string.h) -AC_CHECK_HEADERS(strings.h) -AC_CHECK_HEADERS(sys/dir.h) -AC_CHECK_HEADERS(sys/ioctl.h) -AC_CHECK_HEADERS(sys/param.h) -AC_CHECK_HEADERS(sys/reboot.h) -AC_CHECK_HEADERS(sys/resource.h) -AC_CHECK_HEADERS(sys/socket.h) -AC_CHECK_HEADERS(sys/signalfd.h) -AC_CHECK_HEADERS(sys/sockio.h) -AC_CHECK_HEADERS(sys/stat.h) -AC_CHECK_HEADERS(sys/time.h) -AC_CHECK_HEADERS(sys/types.h) -AC_CHECK_HEADERS(sys/utsname.h) -AC_CHECK_HEADERS(sys/wait.h) -AC_CHECK_HEADERS(time.h) -AC_CHECK_HEADERS(unistd.h) -if test "x${enable_fatal_warnings}" = xyes ; then - cc_restore_flags -fi - -dnl These headers need prerequisites before the tests will pass -dnl AC_CHECK_HEADERS(net/if.h) - -PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], - [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" - LIBS="${LIBS} ${LIBXML2_LIBS}"]) -AC_CHECK_HEADERS(libxml/xpath.h) -if test "$ac_cv_header_libxml_xpath_h" != "yes"; then - AC_MSG_ERROR(libxml development headers not found) -fi - -AC_CHECK_LIB(xslt, xsltApplyStylesheet, [], - AC_MSG_ERROR(Unsupported libxslt library version)) -AC_CHECK_HEADERS(libxslt/xslt.h) -if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then - AC_MSG_ERROR(libxslt development headers not found) -fi - -AC_CACHE_CHECK(whether __progname and __progname_full are available, - pf_cv_var_progname, - AC_TRY_LINK([extern char *__progname, *__progname_full;], - [__progname = "foo"; __progname_full = "foo bar";], - pf_cv_var_progname="yes", pf_cv_var_progname="no")) - -if test "$pf_cv_var_progname" = "yes"; then - AC_DEFINE(HAVE___PROGNAME,1,[ ]) -fi +AS_IF([test "x${enable_fatal_warnings}" = xyes], + [cc_temp_flags "$CFLAGS $WERROR"]) + +# Optional headers (inclusion of these should be conditional in C code) +AC_CHECK_HEADERS([getopt.h]) +AC_CHECK_HEADERS([linux/swab.h]) +AC_CHECK_HEADERS([stddef.h]) +AC_CHECK_HEADERS([sys/signalfd.h]) +AC_CHECK_HEADERS([uuid/uuid.h]) +AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h]) + +# Required headers +REQUIRE_HEADER([arpa/inet.h]) +REQUIRE_HEADER([ctype.h]) +REQUIRE_HEADER([dirent.h]) +REQUIRE_HEADER([errno.h]) +REQUIRE_HEADER([glib.h]) +REQUIRE_HEADER([grp.h]) +REQUIRE_HEADER([limits.h]) +REQUIRE_HEADER([netdb.h]) +REQUIRE_HEADER([netinet/in.h]) +REQUIRE_HEADER([netinet/ip.h], [ + #include + #include +]) +REQUIRE_HEADER([pwd.h]) +REQUIRE_HEADER([signal.h]) +REQUIRE_HEADER([stdio.h]) +REQUIRE_HEADER([stdlib.h]) +REQUIRE_HEADER([string.h]) +REQUIRE_HEADER([strings.h]) +REQUIRE_HEADER([sys/ioctl.h]) +REQUIRE_HEADER([sys/param.h]) +REQUIRE_HEADER([sys/reboot.h]) +REQUIRE_HEADER([sys/resource.h]) +REQUIRE_HEADER([sys/socket.h]) +REQUIRE_HEADER([sys/stat.h]) +REQUIRE_HEADER([sys/time.h]) +REQUIRE_HEADER([sys/types.h]) +REQUIRE_HEADER([sys/utsname.h]) +REQUIRE_HEADER([sys/wait.h]) +REQUIRE_HEADER([time.h]) +REQUIRE_HEADER([unistd.h]) +REQUIRE_HEADER([libxml/xpath.h]) +REQUIRE_HEADER([libxslt/xslt.h]) + +AS_IF([test "x${enable_fatal_warnings}" = xyes], [cc_restore_flags]) + +AC_CHECK_FUNCS([uuid_unparse], [], + [AC_MSG_FAILURE([Could not find required C function uuid_unparse()])]) + +AC_CACHE_CHECK([whether __progname and __progname_full are available], + [pf_cv_var_progname], AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]], + [[__progname = "foo"; __progname_full = "foo bar";]], + [pf_cv_var_progname="yes"], + [pf_cv_var_progname="no"]) + ])) +AS_IF([test "$pf_cv_var_progname" = "yes"], [AC_DEFINE(HAVE___PROGNAME,1,[ ])]) dnl ======================================================================== dnl Generic declarations dnl ======================================================================== -AC_CHECK_DECLS([CLOCK_MONOTONIC], [], [], [[ +AC_CHECK_DECLS([CLOCK_MONOTONIC], [PCMK_FEATURES="$PCMK_FEATURES monotonic"], [], [[ #include ]]) dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== -AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) -AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) +AC_CHECK_FUNCS(getopt) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) -if test "$pf_cv_var_sscanf" = "yes"; then - AC_DEFINE(SSCANF_HAS_M, 1, [ ]) -fi +AS_IF([test "$pf_cv_var_sscanf" = "yes"], [AC_DEFINE(SSCANF_HAS_M, 1, [ ])]) dnl ======================================================================== dnl bzip2 dnl ======================================================================== -AC_CHECK_HEADERS(bzlib.h) -AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) - -if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then - AC_MSG_ERROR(BZ2 libraries not found) -fi - -if test x$ac_cv_header_bzlib_h != xyes; then - AC_MSG_ERROR(BZ2 Development headers not found) -fi +REQUIRE_HEADER([bzlib.h]) +AC_CHECK_LIB([bz2], [BZ2_bzBuffToBuffCompress],, + [AC_MSG_FAILURE(Could not find required C library libbz2)]) dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) -AC_TRY_COMPILE([#include ],[sighandler_t *f;], -has_sighandler_t=yes,has_sighandler_t=no) -AC_MSG_RESULT($has_sighandler_t) -if test "$has_sighandler_t" = "yes" ; then - AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) -fi +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[sighandler_t *f;]])], + [ + AC_MSG_RESULT([yes]) + AC_DEFINE([HAVE_SIGHANDLER_T], [1], + [Define to 1 if sighandler_t is available]) + ], + [AC_MSG_RESULT([no])]) dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl -dnl ncurse takes precedence. +dnl ncurses takes precedence. dnl -AC_CHECK_HEADERS(curses.h) -AC_CHECK_HEADERS(curses/curses.h) -AC_CHECK_HEADERS(ncurses.h) -AC_CHECK_HEADERS(ncurses/ncurses.h) +AC_CHECK_HEADERS([curses.h curses/curses.h ncurses.h ncurses/ncurses.h]) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" -if test "$ac_cv_header_ncurses_h" = "yes"; then +AS_IF([test "$ac_cv_header_ncurses_h" = "yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" -fi +]) -if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then +AS_IF([test "$ac_cv_header_ncurses_ncurses_h" = "yes"], [ AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" -fi +]) dnl Only look for non-n-library if there was no n-library. -if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then +AS_IF([test X"$CURSESLIBS" = X"" && test "$ac_cv_header_curses_h" = "yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" -fi +]) dnl Only look for non-n-library if there was no n-library. -if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then +AS_IF([test X"$CURSESLIBS" = X"" && test "$ac_cv_header_curses_curses_h" = "yes"], [ AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" -fi +]) if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility -if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual; then +AS_IF([test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual], [ ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" cc_temp_flags "-Wcast-qual $WERROR" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 - if cc_supports_flag -fPIC; then - CFLAGS="$CFLAGS -fPIC" - fi + AS_IF([cc_supports_flag -fPIC], [CFLAGS="$CFLAGS -fPIC"]) - AC_MSG_CHECKING(whether printw() requires argument of "const char *") + AC_MSG_CHECKING([whether curses library is compatible]) AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], - [pcmk_cv_compatible_printw=yes], - [pcmk_cv_compatible_printw=no] + [AC_MSG_RESULT([yes])], + [ + AC_MSG_RESULT([no]) + AC_MSG_WARN(m4_normalize([Disabling curses because the printw() + function of your (n)curses library is old. + If you wish to enable curses, update to a + newer version (ncurses 5.4 or later is + recommended, available from + https://invisible-island.net/ncurses/) + ])) + AC_DEFINE([HAVE_INCOMPATIBLE_PRINTW], [1], + [Define to 1 if curses library has incompatible printw()]) + ] ) LIBS=$ac_save_LIBS cc_restore_flags - - AC_MSG_RESULT([$pcmk_cv_compatible_printw]) - - if test "$pcmk_cv_compatible_printw" = no; then - AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) - AC_MSG_NOTICE([Disabling curses]) - AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) - fi -fi +]) AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== -AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) +CFLAGS_ORIG="$CFLAGS" case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0 ;; esac -AC_MSG_NOTICE(New CFLAGS: $CFLAGS) +AS_IF([test ${SUPPORT_PROFILING} -eq 0], [], + [ + AC_MSG_NOTICE([CFLAGS before adding profiling options: $CFLAGS_ORIG]) + AC_MSG_NOTICE([CFLAGS after: $CFLAGS]) + ]) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== -if test x${enable_no_stack} = xyes; then - SUPPORT_CS=no -fi - -PKG_CHECK_MODULES(libqb, libqb >= 0.13) +PKG_CHECK_MODULES(libqb, libqb >= 0.17) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" -dnl libqb 2.02+ (2020-10) +dnl libqb 2.0.2+ (2020-10) AC_CHECK_FUNCS(qb_ipcc_auth_get, AC_DEFINE(HAVE_IPCC_AUTH_GET, 1, [Have qb_ipcc_auth_get function])) -PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" - -dnl libqb 0.17.0+ (2014-02) -AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, - AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, - [Have qb_ipcc_get_buffer_size function])) - dnl libqb 2.0.0+ (2020-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi -AC_CHECK_HEADERS(stonith/stonith.h) -if test "$ac_cv_header_stonith_stonith_h" = "yes"; then - dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols - dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb - AC_CHECK_LIB(pils, PILLoadPlugin) - AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) - PCMK_FEATURES="$PCMK_FEATURES lha-fencing" -fi +AC_CHECK_HEADERS([stonith/stonith.h], + [ + AC_CHECK_LIB([pils], [PILLoadPlugin]) + AC_CHECK_LIB([plumb], [G_main_add_IPC_Channel]) + PCMK_FEATURES="$PCMK_FEATURES lha" + ]) AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) if test x"${CRM_DAEMON_USER}" = x""; then CRM_DAEMON_USER="hacluster" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) if test x"${CRM_DAEMON_GROUP}" = x""; then CRM_DAEMON_GROUP="haclient" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) OCF_RA_DIR="$OCF_ROOT_DIR/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries]) AC_PATH_PROGS(GIT, git false) -AC_MSG_CHECKING(build version) +AC_MSG_CHECKING([build version]) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then - AC_MSG_RESULT(archive hash: $BUILD_VERSION) -elif test -x $GIT -a -d .git; then + AC_MSG_RESULT([$BUILD_VERSION (archive hash)]) +elif test -x $GIT && test -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` - AC_MSG_RESULT(git hash: $BUILD_VERSION) + AC_MSG_RESULT([$BUILD_VERSION (git hash)]) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` - AC_MSG_RESULT(directory based hash: $BUILD_VERSION) + AC_MSG_RESULT([$BUILD_VERSION (directory name)]) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test $HAVE_dbus = 0; then PC_NAME_DBUS="" else PC_NAME_DBUS="dbus-1" fi AC_SUBST(PC_NAME_DBUS) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then - AC_MSG_FAILURE([cannot enable systemd without DBus]) + AC_MSG_FAILURE([Cannot support systemd resources without DBus]) else enable_systemd=no fi fi if test $(echo "$CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then if test "x${enable_systemd}" = xyes; then - AC_MSG_FAILURE([cannot enable systemd without clock_gettime(CLOCK_MONOTONIC, ...)]) + AC_MSG_FAILURE([Cannot support systemd resources without clock_gettime(CLOCK_MONOTONIC, ...)]) else enable_systemd=no fi fi if test "x${enable_systemd}" = xtry; then - AC_MSG_CHECKING([for systemd version query result via dbus-send]) + AC_MSG_CHECKING([for systemd version (using dbus-send)]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ - || echo "this borked"; } | tail -n1) + || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) - if test "x${ret}" != xborked \ + if test "x${ret}" != xunavailable \ || systemctl --version 2>/dev/null | grep -q systemd; then enable_systemd=yes else enable_systemd=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) HAVE_systemd=0 -if test "x${enable_systemd}" = xyes; then +AS_IF([test "x${enable_systemd}" = xyes], [ HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) - if test "x${systemdsystemunitdir}" = x""; then - AC_MSG_FAILURE([cannot enable systemd when systemdsystemunitdir unresolved]) - fi -fi + AS_IF([test "x${systemdsystemunitdir}" != x""], [], + [AC_MSG_FAILURE([Cannot enable systemd because systemdsystemunitdir is unknown])]) +]) AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then - AC_MSG_FAILURE([cannot enable Upstart without DBus]) + AC_MSG_FAILURE([Cannot support Upstart resources without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then - AC_MSG_CHECKING([for Upstart version query result via dbus-send]) + AC_MSG_CHECKING([for Upstart version (using dbus-send)]) ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ - || echo "this borked"; } | tail -n1) + || echo "version unavailable"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) - if test "x${ret}" != xborked \ + if test "x${ret}" != xunavailable \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) HAVE_upstart=0 if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) case $SUPPORT_NAGIOS in 1|yes|true) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then - AC_MSG_FAILURE([cannot enable nagios without clock_gettime(CLOCK_MONOTONIC, ...)]) + AC_MSG_FAILURE([Cannot support nagios resources without clock_gettime(CLOCK_MONOTONIC, ...)]) fi SUPPORT_NAGIOS=1 ;; try) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then SUPPORT_NAGIOS=0 else SUPPORT_NAGIOS=1 fi ;; *) SUPPORT_NAGIOS=0 ;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_CS=no ;; esac -AC_MSG_CHECKING(for native corosync) +AC_MSG_CHECKING([for Corosync 2 or later]) COROSYNC_LIBS="" -if test $SUPPORT_CS = no; then - AC_MSG_RESULT(no (disabled)) - SUPPORT_CS=0 -else - AC_MSG_RESULT($SUPPORT_CS) - SUPPORT_CS=1 - PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal - PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal - PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal - PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal - PKG_CHECK_MODULES(libcorosync_common, libcorosync_common) dnl Fatal - - CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" - COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" - CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" - PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" - STACKS="$STACKS corosync-native" -fi +AS_IF([test $SUPPORT_CS = no], + [ + AC_MSG_RESULT([no (disabled)]) + SUPPORT_CS=0 + ], + [ + AC_MSG_RESULT([$SUPPORT_CS]) + SUPPORT_CS=1 + PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal + PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal + PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal + PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal + PKG_CHECK_MODULES(libcorosync_common, libcorosync_common) dnl Fatal + + CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" + COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" + CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" + PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" + STACKS="$STACKS corosync-ge-2" + ]) AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AC_SUBST(SUPPORT_COROSYNC) dnl dnl Cluster stack - Sanity dnl -if test x${enable_no_stack} = xyes; then - AC_MSG_NOTICE(No cluster stack supported, building only the scheduler) - PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" -else - AC_MSG_CHECKING(for supported stacks) - if test x"$STACKS" = x; then - AC_MSG_FAILURE(You must support at least one cluster stack) - fi - AC_MSG_RESULT($STACKS) - PCMK_FEATURES="$PCMK_FEATURES $STACKS" -fi +AS_IF([test "x$STACKS" != "x"], [AC_MSG_NOTICE([Supported stacks:${STACKS}])], + [AC_MSG_FAILURE([At least one cluster stack must be supported])]) + +PCMK_FEATURES="${PCMK_FEATURES}${STACKS}" -PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in - 1|yes|true) - missingisfatal=1 - ;; - try) - missingisfatal=0 + 1|yes|true|try) + SUPPORT_ACL=1 + PCMK_FEATURES="$PCMK_FEATURES acls" ;; *) - SUPPORT_ACL=no + SUPPORT_ACL=0 ;; esac - -AC_MSG_CHECKING(for acl support) -if test $SUPPORT_ACL = no; then - AC_MSG_RESULT(no (disabled)) - SUPPORT_ACL=0 -else - AC_MSG_RESULT($SUPPORT_ACL) - - AC_CHECK_FUNCS(qb_ipcs_connection_auth_set, SUPPORT_ACL=1, SUPPORT_ACL=0) - - if test $SUPPORT_ACL = 0; then - if test $missingisfatal = 0; then - AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) - else - AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) - fi - fi -fi - -if test $SUPPORT_ACL = 1; then - PCMK_FEATURES="$PCMK_FEATURES acls" -fi - AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1 ;; *) SUPPORT_CIBSECRETS=0 ;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) -if test $SUPPORT_CIBSECRETS = 1; then +AS_IF([test $SUPPORT_CIBSECRETS = 1], [ PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) -fi +]) dnl ======================================================================== dnl GnuTLS dnl ======================================================================== -dnl gnutls_priority_set_direct available since 2.1.7 (released 2007-11-29) -AC_CHECK_LIB(gnutls, gnutls_priority_set_direct) -if test "$ac_cv_lib_gnutls_gnutls_priority_set_direct" != ""; then - AC_CHECK_HEADERS(gnutls/gnutls.h) - AC_CHECK_FUNCS([gnutls_sec_param_to_pk_bits]) dnl since 2.12.0 (2011-03-24) - if test "$ac_cv_header_gnutls_gnutls_h" != "yes"; then - PC_NAME_GNUTLS="" - else - PC_NAME_GNUTLS="gnutls" - fi +dnl Require GnuTLS >=2.12.0 (2011-03) to support Pacemaker Remote +AC_CHECK_LIB(gnutls, gnutls_sec_param_to_pk_bits) +AS_IF([test "$ac_cv_lib_gnutls_gnutls_sec_param_to_pk_bits" != ""], [ + AC_CHECK_HEADERS([gnutls/gnutls.h], + [ + PC_NAME_GNUTLS="gnutls" + PCMK_FEATURES="$PCMK_FEATURES remote" + ], + [PC_NAME_GNUTLS=""]) AC_SUBST(PC_NAME_GNUTLS) -fi - -dnl ======================================================================== -dnl PAM -dnl ======================================================================== - -AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) +]) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" -AC_MSG_CHECKING(for $SERVICELOG packages) +AC_MSG_CHECKING([for $SERVICELOG packages]) if $PKG_CONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" + PCMK_FEATURES="$PCMK_FEATURES servicelog" fi -AC_MSG_RESULT($SERVICELOG_EXISTS) +AC_MSG_RESULT([$SERVICELOG_EXISTS]) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" -AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) +AC_MSG_CHECKING([for $SERVICELOG $OPENIPMI packages]) if $PKG_CONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) + REQUIRE_HEADER([malloc.h]) OPENIPMI_SERVICELOG_EXISTS="yes" + PCMK_FEATURES="$PCMK_FEATURES ipmiservicelogd" fi -AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) +AC_MSG_RESULT([$OPENIPMI_SERVICELOG_EXISTS]) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") # --- ASAN/UBSAN/TSAN (see man gcc) --- # when using SANitizers, we need to pass the -fsanitize.. # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be # specified as first in the list or there will be runtime # issues (for example user has to LD_PRELOAD asan for it to work # properly). -if test -n "${SANITIZERS}"; then +AS_IF([test -n "${SANITIZERS}"], [ SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g') - for SANITIZER in $SANITIZERS; do - case $SANITIZER in - asan|ASAN) - SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address" - SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan" - AC_CHECK_LIB([asan],[main],,AC_MSG_ERROR([Unable to find libasan])) - ;; - ubsan|UBSAN) - SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined" - SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan" - AC_CHECK_LIB([ubsan],[main],,AC_MSG_ERROR([Unable to find libubsan])) - ;; - tsan|TSAN) - SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread" - SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan" - AC_CHECK_LIB([tsan],[main],,AC_MSG_ERROR([Unable to find libtsan])) - ;; - esac + for SANITIZER in $SANITIZERS + do + AS_CASE([$SANITIZER], + [asan|ASAN], [ + SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address" + SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan" + AC_CHECK_LIB([asan],[main],,AC_MSG_ERROR([Unable to find libasan])) + ], + [ubsan|UBSAN], [ + SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined" + SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan" + AC_CHECK_LIB([ubsan],[main],,AC_MSG_ERROR([Unable to find libubsan])) + ], + [tsan|TSAN], [ + SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread" + SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan" + AC_CHECK_LIB([tsan],[main],,AC_MSG_ERROR([Unable to find libtsan])) + ]) done -fi +]) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" -if test "$GCC" != yes; then - CFLAGS="$CFLAGS -g" -else +AS_IF([test "$GCC" != yes], [CFLAGS="$CFLAGS -g"], [ CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags - if cc_supports_flag "-Wformat-nonliteral"; then - gcc_format_nonliteral=yes - else - gcc_format_nonliteral=no - fi + AS_IF([cc_supports_flag "-Wformat-nonliteral"], + [gcc_format_nonliteral=yes], + [gcc_format_nonliteral=no]) # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). - EXTRA_FLAGS="-fgnu89-inline - -Wall - -Waggregate-return - -Wbad-function-cast - -Wcast-align - -Wdeclaration-after-statement - -Wendif-labels - -Wfloat-equal - -Wformat-security - -Wmissing-prototypes - -Wmissing-declarations - -Wnested-externs - -Wno-long-long - -Wno-strict-aliasing - -Wpointer-arith - -Wstrict-prototypes - -Wwrite-strings - -Wunused-but-set-variable - -Wunsigned-char" - - if test "x$gcc_diagnostic_push_pull" = "xyes"; then - AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], - [gcc can complain about nonliterals in format]) - EXTRA_FLAGS="$EXTRA_FLAGS - -Wformat=2 - -Wformat-nonliteral" - else - if test "x$gcc_format_nonliteral" = "xyes"; then - EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" - fi - fi + EXTRA_FLAGS="-fgnu89-inline" + EXTRA_FLAGS="$EXTRA_FLAGS -Wall" + EXTRA_FLAGS="$EXTRA_FLAGS -Waggregate-return" + EXTRA_FLAGS="$EXTRA_FLAGS -Wbad-function-cast" + EXTRA_FLAGS="$EXTRA_FLAGS -Wcast-align" + EXTRA_FLAGS="$EXTRA_FLAGS -Wdeclaration-after-statement" + EXTRA_FLAGS="$EXTRA_FLAGS -Wendif-labels" + EXTRA_FLAGS="$EXTRA_FLAGS -Wfloat-equal" + EXTRA_FLAGS="$EXTRA_FLAGS -Wformat-security" + EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-prototypes" + EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-declarations" + EXTRA_FLAGS="$EXTRA_FLAGS -Wnested-externs" + EXTRA_FLAGS="$EXTRA_FLAGS -Wno-long-long" + EXTRA_FLAGS="$EXTRA_FLAGS -Wno-strict-aliasing" + EXTRA_FLAGS="$EXTRA_FLAGS -Wpointer-arith" + EXTRA_FLAGS="$EXTRA_FLAGS -Wstrict-prototypes" + EXTRA_FLAGS="$EXTRA_FLAGS -Wwrite-strings" + EXTRA_FLAGS="$EXTRA_FLAGS -Wunused-but-set-variable" + EXTRA_FLAGS="$EXTRA_FLAGS -Wunsigned-char" + + AS_IF([test "x$gcc_diagnostic_push_pull" = "xyes"], + [ + AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], + [gcc can complain about nonliterals in format]) + EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" + ], + [test "x$gcc_format_nonliteral" = "xyes"], + [EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"]) # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do - if - cc_supports_flag $CC_EXTRAS $j - then - CC_EXTRAS="$CC_EXTRAS $j" - fi + AS_IF([cc_supports_flag $CC_EXTRAS $j], [CC_EXTRAS="$CC_EXTRAS $j"]) done - if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then - AC_MSG_NOTICE(Enabling ANSI Compatibility) - CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" - fi - - AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) -fi + AC_MSG_NOTICE([Using additional gcc flags: ${CC_EXTRAS}]) +]) dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl cib, stonithd, pacemaker-remoted, pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi -if test "x${HARDENING}" = "xno"; then - AC_MSG_NOTICE([Hardening: explicitly disabled]) -elif test "x${HARDENING}" = "xyes" \ - || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then - dnl We'll figure out on our own... - CFLAGS_HARDENED_EXE= - CFLAGS_HARDENED_LIB= - LDFLAGS_HARDENED_EXE= - LDFLAGS_HARDENED_LIB= - relro=0 - pie=0 - bindnow=0 - # daemons incl. libs: partial RELRO - flag="-Wl,-z,relro" - CC_CHECK_LDFLAGS(["${flag}"], - [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; - LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; - relro=1]) - # daemons: PIE for both CFLAGS and LDFLAGS - if cc_supports_flag -fPIE; then - flag="-pie" - CC_CHECK_LDFLAGS(["${flag}"], - [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; - LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; - pie=1]) - fi - # daemons incl. libs: full RELRO if sensible + as-needed linking - # so as to possibly mitigate startup performance - # hit caused by excessive linking with unneeded - # libraries - if test "${relro}" = 1 && test "${pie}" = 1; then - flag="-Wl,-z,now" - CC_CHECK_LDFLAGS(["${flag}"], - [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; - LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; - bindnow=1]) - fi - if test "${bindnow}" = 1; then - flag="-Wl,--as-needed" - CC_CHECK_LDFLAGS(["${flag}"], - [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; - LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"]) - fi - # universal: prefer strong > all > default stack protector if possible - flag= - if cc_supports_flag -fstack-protector-strong; then - flag="-fstack-protector-strong" - elif cc_supports_flag -fstack-protector-all; then - flag="-fstack-protector-all" - elif cc_supports_flag -fstack-protector; then - flag="-fstack-protector" - fi - if test -n "${flag}"; then - CC_EXTRAS="${CC_EXTRAS} ${flag}" - stackprot=1 - fi - if test "${relro}" = 1 \ - || test "${pie}" = 1 \ - || test "${stackprot}" = 1; then - AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) - else - AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) - fi -else - AC_MSG_NOTICE([Hardening: using custom flags]) -fi +AS_IF([test "x${HARDENING}" = "xno"], + [AC_MSG_NOTICE([Hardening: explicitly disabled])], + [test "x${HARDENING}" = "xyes" || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0], + [ + dnl We'll figure out on our own... + CFLAGS_HARDENED_EXE= + CFLAGS_HARDENED_LIB= + LDFLAGS_HARDENED_EXE= + LDFLAGS_HARDENED_LIB= + relro=0 + pie=0 + bindnow=0 + # daemons incl. libs: partial RELRO + flag="-Wl,-z,relro" + CC_CHECK_LDFLAGS(["${flag}"], + [ + LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; + LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; + relro=1 + ]) + # daemons: PIE for both CFLAGS and LDFLAGS + AS_IF([cc_supports_flag -fPIE], + [ + flag="-pie" + CC_CHECK_LDFLAGS(["${flag}"], + [ + CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; + LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; + pie=1 + ]) + ]) + # daemons incl. libs: full RELRO if sensible + as-needed linking + # so as to possibly mitigate startup performance + # hit caused by excessive linking with unneeded + # libraries + AS_IF([test "${relro}" = 1 && test "${pie}" = 1], + [ + flag="-Wl,-z,now" + CC_CHECK_LDFLAGS(["${flag}"], + [ + LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; + LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; + bindnow=1 + ]) + ]) + AS_IF([test "${bindnow}" = 1], + [ + flag="-Wl,--as-needed" + CC_CHECK_LDFLAGS(["${flag}"], + [ + LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; + LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}" + ]) + ]) + # universal: prefer strong > all > default stack protector if possible + flag= + AS_IF([cc_supports_flag -fstack-protector-strong], + [flag="-fstack-protector-strong"], + [cc_supports_flag -fstack-protector-all], + [flag="-fstack-protector-all"], + [cc_supports_flag -fstack-protector], + [flag="-fstack-protector"]) + AS_IF([test -n "${flag}"], + [ + CC_EXTRAS="${CC_EXTRAS} ${flag}" + stackprot=1 + ]) + AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test "${stackprot}" = 1], + [AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}])], + [AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])]) + ], + [AC_MSG_NOTICE([Hardening: using custom flags])]) CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS" LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS" CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE" LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then - AC_MSG_NOTICE(Enabling Fatal Warnings) + AC_MSG_NOTICE([Enabling fatal compiler warnings]) CFLAGS="$CFLAGS $WERROR" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output +AC_MSG_CHECKING([whether to suppress make details]) QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant fi - -AC_MSG_RESULT(Suppress make details: ${enable_quiet}) +AC_MSG_RESULT([${enable_quiet}]) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl Files we output that need to be executable AC_CONFIG_FILES([cts/CTSlab.py], [chmod +x cts/CTSlab.py]) AC_CONFIG_FILES([cts/LSBDummy], [chmod +x cts/LSBDummy]) AC_CONFIG_FILES([cts/OCFIPraTest.py], [chmod +x cts/OCFIPraTest.py]) AC_CONFIG_FILES([cts/cluster_test], [chmod +x cts/cluster_test]) AC_CONFIG_FILES([cts/cts], [chmod +x cts/cts]) AC_CONFIG_FILES([cts/cts-cli], [chmod +x cts/cts-cli]) AC_CONFIG_FILES([cts/cts-coverage], [chmod +x cts/cts-coverage]) AC_CONFIG_FILES([cts/cts-exec], [chmod +x cts/cts-exec]) AC_CONFIG_FILES([cts/cts-fencing], [chmod +x cts/cts-fencing]) AC_CONFIG_FILES([cts/cts-log-watcher], [chmod +x cts/cts-log-watcher]) AC_CONFIG_FILES([cts/cts-regression], [chmod +x cts/cts-regression]) AC_CONFIG_FILES([cts/cts-scheduler], [chmod +x cts/cts-scheduler]) AC_CONFIG_FILES([cts/cts-support], [chmod +x cts/cts-support]) AC_CONFIG_FILES([cts/lxc_autogen.sh], [chmod +x cts/lxc_autogen.sh]) AC_CONFIG_FILES([cts/benchmark/clubench], [chmod +x cts/benchmark/clubench]) AC_CONFIG_FILES([cts/fence_dummy], [chmod +x cts/fence_dummy]) AC_CONFIG_FILES([cts/pacemaker-cts-dummyd], [chmod +x cts/pacemaker-cts-dummyd]) AC_CONFIG_FILES([daemons/fenced/fence_legacy], [chmod +x daemons/fenced/fence_legacy]) AC_CONFIG_FILES([doc/abi-check], [chmod +x doc/abi-check]) AC_CONFIG_FILES([extra/resources/ClusterMon], [chmod +x extra/resources/ClusterMon]) AC_CONFIG_FILES([extra/resources/HealthSMART], [chmod +x extra/resources/HealthSMART]) AC_CONFIG_FILES([extra/resources/SysInfo], [chmod +x extra/resources/SysInfo]) AC_CONFIG_FILES([extra/resources/ifspeed], [chmod +x extra/resources/ifspeed]) AC_CONFIG_FILES([extra/resources/o2cb], [chmod +x extra/resources/o2cb]) AC_CONFIG_FILES([tools/crm_failcount], [chmod +x tools/crm_failcount]) AC_CONFIG_FILES([tools/crm_master], [chmod +x tools/crm_master]) AC_CONFIG_FILES([tools/crm_report], [chmod +x tools/crm_report]) AC_CONFIG_FILES([tools/crm_standby], [chmod +x tools/crm_standby]) AC_CONFIG_FILES([tools/cibsecret], [chmod +x tools/cibsecret]) AC_CONFIG_FILES([tools/pcmk_simtimes], [chmod +x tools/pcmk_simtimes]) dnl Other files we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTS.py \ cts/CTSvars.py \ cts/benchmark/Makefile \ cts/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/schedulerd/Makefile \ devel/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/sphinx/Makefile \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ replace/Makefile \ lib/Makefile \ lib/libpacemaker.pc \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/agents/Makefile \ lib/common/tests/cmdline/Makefile \ lib/common/tests/flags/Makefile \ lib/common/tests/operations/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/gnu/Makefile \ lib/pacemaker/Makefile \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/rules/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ maint/Makefile \ tests/Makefile \ tools/Makefile \ tools/report.collector \ tools/report.common \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() +# Strip leading space from features list +PCMK_FEATURES=`echo -e "$PCMK_FEATURES" | sed -e 's/^ //'` + dnl ***************** dnl Configure summary dnl ***************** -AC_MSG_RESULT([]) -AC_MSG_RESULT([$PACKAGE configuration:]) -AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) -AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([ Prefix = ${prefix}]) -AC_MSG_RESULT([ Executables = ${sbindir}]) -AC_MSG_RESULT([ Man pages = ${mandir}]) -AC_MSG_RESULT([ Libraries = ${libdir}]) -AC_MSG_RESULT([ Header files = ${includedir}]) -AC_MSG_RESULT([ Arch-independent files = ${datadir}]) -AC_MSG_RESULT([ State information = ${localstatedir}]) -AC_MSG_RESULT([ System configuration = ${sysconfdir}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) -AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) -AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) -AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) -AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) -AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) -AC_MSG_RESULT([ Libraries = ${LIBS}]) -AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) -AC_MSG_RESULT([ Unix socket auth method = ${us_auth}]) +AC_MSG_NOTICE([]) +AC_MSG_NOTICE([$PACKAGE configuration:]) +AC_MSG_NOTICE([ Version = ${VERSION} (Build: $BUILD_VERSION)]) +AC_MSG_NOTICE([ Features = ${PCMK_FEATURES}]) +AC_MSG_NOTICE([]) +AC_MSG_NOTICE([ Prefix = ${prefix}]) +AC_MSG_NOTICE([ Executables = ${sbindir}]) +AC_MSG_NOTICE([ Man pages = ${mandir}]) +AC_MSG_NOTICE([ Libraries = ${libdir}]) +AC_MSG_NOTICE([ Header files = ${includedir}]) +AC_MSG_NOTICE([ Arch-independent files = ${datadir}]) +AC_MSG_NOTICE([ State information = ${localstatedir}]) +AC_MSG_NOTICE([ System configuration = ${sysconfdir}]) +AC_MSG_NOTICE([]) +AC_MSG_NOTICE([ HA group name = ${CRM_DAEMON_GROUP}]) +AC_MSG_NOTICE([ HA user name = ${CRM_DAEMON_USER}]) +AC_MSG_NOTICE([]) +AC_MSG_NOTICE([ CFLAGS = ${CFLAGS}]) +AC_MSG_NOTICE([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) +AC_MSG_NOTICE([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) +AC_MSG_NOTICE([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) +AC_MSG_NOTICE([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) +AC_MSG_NOTICE([ Libraries = ${LIBS}]) +AC_MSG_NOTICE([ Stack Libraries = ${CLUSTERLIBS}]) +AC_MSG_NOTICE([ Unix socket auth method = ${us_auth}]) diff --git a/cts/CIB.py b/cts/CIB.py index 755c258146..52ecbfcfc7 100644 --- a/cts/CIB.py +++ b/cts/CIB.py @@ -1,521 +1,518 @@ """ CIB generator for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - __copyright__ = "Copyright 2008-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import os import warnings import tempfile from cts.CTSvars import * class CibBase(object): def __init__(self, Factory, tag, _id, **kwargs): self.tag = tag self.name = _id self.kwargs = kwargs self.children = [] self.Factory = Factory def __repr__(self): return "%s-%s" % (self.tag, self.name) def add_child(self, child): self.children.append(child) def __setitem__(self, key, value): if value: self.kwargs[key] = value else: self.kwargs.pop(key, None) from cts.cib_xml import * class ConfigBase(object): cts_cib = None version = "unknown" Factory = None def __init__(self, CM, factory, tmpfile=None): self.CM = CM self.Factory = factory if not tmpfile: warnings.filterwarnings("ignore") f=tempfile.NamedTemporaryFile(delete=True) f.close() tmpfile = f.name warnings.resetwarnings() self.Factory.tmpfile = tmpfile def version(self): return self.version def NextIP(self): ip = self.CM.Env["IPBase"] if ":" in ip: (prefix, sep, suffix) = ip.rpartition(":") suffix = str(hex(int(suffix, 16)+1)).lstrip("0x") else: (prefix, sep, suffix) = ip.rpartition(".") suffix = str(int(suffix)+1) ip = prefix + sep + suffix self.CM.Env["IPBase"] = ip return ip.strip() class CIB12(ConfigBase): version = "pacemaker-1.2" counter = 1 def _show(self, command=""): output = "" (rc, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, None, ) for line in result: output += line self.Factory.debug("Generated Config: "+line) return output def NewIP(self, name=None, standard="ocf"): if self.CM.Env["IPagent"] == "IPaddr2": ip = self.NextIP() if not name: if ":" in ip: (prefix, sep, suffix) = ip.rpartition(":") name = "r"+suffix else: name = "r"+ip r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard) r["ip"] = ip if ":" in ip: r["cidr_netmask"] = "64" r["nic"] = "eth0" else: r["cidr_netmask"] = "32" else: if not name: name = "r%s%d" % (self.CM.Env["IPagent"], self.counter) self.counter = self.counter + 1 r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard) r.add_op("monitor", "5s") return r def get_node_id(self, node_name): """ Check the cluster configuration for a node ID. """ # We can't account for every possible configuration, # so we only return a node ID if: # * The node is specified in /etc/corosync/corosync.conf # with "ring0_addr:" equal to node_name and "nodeid:" # explicitly specified. # In all other cases, we return 0. node_id = 0 # awkward command: use } as record separator # so each corosync.conf "object" is one record; # match the "node {" record that has "ring0_addr: node_name"; # then print the substring of that record after "nodeid:" (rc, output) = self.Factory.rsh(self.Factory.target, r"""awk -v RS="}" """ r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/""" r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}'""" r""" /etc/corosync/corosync.conf""" % node_name, None) if rc == 0 and len(output) == 1: try: node_id = int(output[0]) except ValueError: node_id = 0 return node_id def install(self, target): old = self.Factory.tmpfile # Force a rebuild self.cts_cib = None self.Factory.tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml" self.contents(target) self.Factory.rsh(self.Factory.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.Factory.tmpfile) self.Factory.tmpfile = old def contents(self, target=None): # fencing resource if self.cts_cib: return self.cts_cib if target: self.Factory.target = target self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self.Factory.tmpfile)) self.num_nodes = len(self.CM.Env["nodes"]) no_quorum = "stop" if self.num_nodes < 3: no_quorum = "ignore" self.Factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self.num_nodes) # We don't need a nodes section unless we add attributes stn = None # Fencing resource # Define first so that the shell doesn't reject every update if self.CM.Env["DoFencing"]: # Define the "real" fencing device st = Resource(self.Factory, "Fencing", ""+self.CM.Env["stonith-type"], "stonith") # Set a threshold for unreliable stonith devices such as the vmware one st.add_meta("migration-threshold", "5") st.add_op("monitor", "120s", timeout="120s") st.add_op("stop", "0", timeout="60s") st.add_op("start", "0", timeout="60s") # For remote node tests, a cluster node is stopped and brought back up # as a remote node with the name "remote-OLDNAME". To allow fencing # devices to fence these nodes, create a list of all possible node names. all_node_names = [ prefix+n for n in self.CM.Env["nodes"] for prefix in ('', 'remote-') ] # Add all parameters specified by user entries = self.CM.Env["stonith-params"].split(',') for entry in entries: try: (name, value) = entry.split('=', 1) except ValueError: print("Warning: skipping invalid fencing parameter: %s" % entry) continue # Allow user to specify "all" as the node list, and expand it here if name in [ "hostlist", "pcmk_host_list" ] and value == "all": value = ' '.join(all_node_names) st[name] = value st.commit() # Test advanced fencing logic if True: stf_nodes = [] stt_nodes = [] attr_nodes = {} # Create the levels stl = FencingTopology(self.Factory) for node in self.CM.Env["nodes"]: # Remote node tests will rename the node remote_node = "remote-" + node # Randomly assign node to a fencing method ftype = self.CM.Env.RandomGen.choice(["levels-and", "levels-or ", "broadcast "]) # For levels-and, randomly choose targeting by node name or attribute by = "" if ftype == "levels-and": node_id = self.get_node_id(node) if node_id == 0 or self.CM.Env.RandomGen.choice([True, False]): by = " (by name)" else: attr_nodes[node] = node_id by = " (by attribute)" self.CM.log(" - Using %s fencing for node: %s%s" % (ftype, node, by)) if ftype == "levels-and": # If targeting by name, add a topology level for this node if node not in attr_nodes: stl.level(1, node, "FencingPass,Fencing") # Always target remote nodes by name, otherwise we would need to add # an attribute to the remote node only during remote tests (we don't # want nonexistent remote nodes showing up in the non-remote tests). # That complexity is not worth the effort. stl.level(1, remote_node, "FencingPass,Fencing") # Add the node (and its remote equivalent) to the list of levels-and nodes. stt_nodes.extend([node, remote_node]) elif ftype == "levels-or ": for n in [ node, remote_node ]: stl.level(1, n, "FencingFail") stl.level(2, n, "Fencing") stf_nodes.extend([node, remote_node]) # If any levels-and nodes were targeted by attribute, # create the attributes and a level for the attribute. if attr_nodes: stn = Nodes(self.Factory) for (node_name, node_id) in list(attr_nodes.items()): stn.add_node(node_name, node_id, { "cts-fencing" : "levels-and" }) stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and") # Create a Dummy agent that always passes for levels-and if len(stt_nodes): stt = Resource(self.Factory, "FencingPass", "fence_dummy", "stonith") stt["pcmk_host_list"] = " ".join(stt_nodes) # Wait this many seconds before doing anything, handy for letting disks get flushed too stt["random_sleep_range"] = "30" stt["mode"] = "pass" stt.commit() # Create a Dummy agent that always fails for levels-or if len(stf_nodes): stf = Resource(self.Factory, "FencingFail", "fence_dummy", "stonith") stf["pcmk_host_list"] = " ".join(stf_nodes) # Wait this many seconds before doing anything, handy for letting disks get flushed too stf["random_sleep_range"] = "30" stf["mode"] = "fail" stf.commit() # Now commit the levels themselves stl.commit() o = Option(self.Factory) o["stonith-enabled"] = self.CM.Env["DoFencing"] o["start-failure-is-fatal"] = "false" o["pe-input-series-max"] = "5000" o["shutdown-escalation"] = "5min" o["batch-limit"] = "10" o["dc-deadtime"] = "5s" o["no-quorum-policy"] = no_quorum if self.CM.Env["DoBSC"] == 1: o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!" o.commit() o = OpDefaults(self.Factory) o["timeout"] = "90s" o.commit() # Commit the nodes section if we defined one if stn is not None: stn.commit() # Add an alerts section if possible if self.Factory.rsh.exists_on_all(self.CM.Env["notification-agent"], self.CM.Env["nodes"]): alerts = Alerts(self.Factory) alerts.add_alert(self.CM.Env["notification-agent"], self.CM.Env["notification-recipient"]) alerts.commit() # Add resources? if self.CM.Env["CIBResource"] == 1: self.add_resources() if self.CM.cluster_monitor == 1: mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker") mon.add_op("start", "0", requires="nothing") mon.add_op("monitor", "5s", requires="nothing") mon["update"] = "10" mon["extra_options"] = "-r -n" mon["user"] = "abeekhof" mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html" mon.commit() #self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''') # generate cib self.cts_cib = self._show() if self.Factory.tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml": self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile) return self.cts_cib def add_resources(self): # Per-node resources for node in self.CM.Env["nodes"]: name = "rsc_"+node r = self.NewIP(name) r.prefer(node, "100") r.commit() # Migrator # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach m = Resource(self.Factory, "migrator","Dummy", "ocf", "pacemaker") m["passwd"] = "whatever" m.add_meta("resource-stickiness","1") m.add_meta("allow-migrate", "1") m.add_op("monitor", "P10S") m.commit() # Ping the test exerciser p = Resource(self.Factory, "ping-1","ping", "ocf", "pacemaker") p.add_op("monitor", "60s") p["host_list"] = self.CM.Env["cts-exerciser"] p["name"] = "connected" p["debug"] = "true" c = Clone(self.Factory, "Connectivity", p) c["globally-unique"] = "false" c.commit() # promotable clone resource s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker") s.add_op("monitor", "15s", timeout="60s") s.add_op("monitor", "16s", timeout="60s", role="Master") ms = Clone(self.Factory, "promotable-1", s) ms["promotable"] = "true" ms["clone-max"] = self.num_nodes ms["clone-node-max"] = 1 ms["promoted-max"] = 1 ms["promoted-node-max"] = 1 # Require connectivity to run the promotable clone r = Rule(self.Factory, "connected", "-INFINITY", op="or") r.add_child(Expression(self.Factory, "m1-connected-1", "connected", "lt", "1")) r.add_child(Expression(self.Factory, "m1-connected-2", "connected", "not_defined", None)) ms.prefer("connected", rule=r) ms.commit() # Group Resource g = Group(self.Factory, "group-1") g.add_child(self.NewIP()) if self.CM.Env["have_systemd"]: sysd = Resource(self.Factory, "petulant", "pacemaker-cts-dummyd@10", "service") sysd.add_op("monitor", "P10S") g.add_child(sysd) else: g.add_child(self.NewIP()) g.add_child(self.NewIP()) # Make group depend on the promotable clone g.after("promotable-1", first="promote", then="start") g.colocate("promotable-1", "INFINITY", withrole="Master") g.commit() # LSB resource lsb = Resource(self.Factory, "lsb-dummy", "LSBDummy", "lsb") lsb.add_op("monitor", "5s") # LSB with group lsb.after("group-1") lsb.colocate("group-1") lsb.commit() class CIB20(CIB12): version = "pacemaker-2.5" class CIB30(CIB12): version = "pacemaker-3.0" #class HASI(CIB10): # def add_resources(self): # # DLM resource # self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''') # self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''') # O2CB resource # self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''') # self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''') # self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''') # self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''') class ConfigFactory(object): def __init__(self, CM): self.CM = CM self.rsh = self.CM.rsh self.register("pacemaker12", CIB12, CM, self) self.register("pacemaker20", CIB20, CM, self) self.register("pacemaker30", CIB30, CM, self) # self.register("hae", HASI, CM, self) if self.CM.Env["ListTests"] == 0: self.target = self.CM.Env["nodes"][0] self.tmpfile = None def log(self, args): self.CM.log("cib: %s" % args) def debug(self, args): self.CM.debug("cib: %s" % args) def register(self, methodName, constructor, *args, **kargs): """register a constructor""" _args = [constructor] _args.extend(args) setattr(self, methodName, ConfigFactoryItem(*_args, **kargs)) def unregister(self, methodName): """unregister a constructor""" delattr(self, methodName) def createConfig(self, name="pacemaker-1.0"): if name == "pacemaker-1.0": name = "pacemaker10"; elif name == "pacemaker-1.2": name = "pacemaker12"; elif name == "pacemaker-2.0": name = "pacemaker20"; elif name == "pacemaker-3.0": name = "pacemaker30"; elif name == "hasi": name = "hae"; if hasattr(self, name): return getattr(self, name)() else: self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name) return self.pacemaker30() class ConfigFactoryItem(object): def __init__(self, function, *args, **kargs): self._function = function self._args = args self._kargs = kargs def __call__(self, *args, **kargs): """call function""" _args = list(self._args) _args.extend(args) _kargs = self._kargs.copy() _kargs.update(kargs) return self._function(*_args,**_kargs) if __name__ == '__main__': """ Unit test (pass cluster node names as command line arguments) """ import cts.CTS import cts.CM_corosync import sys if len(sys.argv) < 2: print("Usage: %s ..." % sys.argv[0]) sys.exit(1) args = [ "--nodes", " ".join(sys.argv[1:]), "--clobber-cib", "--populate-resources", "--stack", "corosync", "--test-ip-base", "fe80::1234:56:7890:1000", "--stonith", "rhcs", ] env = CTS.CtsLab(args) cm = CM_corosync.crm_corosync(env) CibFactory = ConfigFactory(cm) cib = CibFactory.createConfig("pacemaker-2.0") print(cib.contents()) diff --git a/cts/CM_common.py b/cts/CM_common.py index b7ff223e90..1cb1249977 100755 --- a/cts/CM_common.py +++ b/cts/CM_common.py @@ -1,471 +1,468 @@ """ Cluster Manager common class for Pacemaker's Cluster Test Suite (CTS) This was originally the cluster manager class for the Heartbeat stack. It is retained for use as a base class by other cluster manager classes. It could be merged into the ClusterManager class directly, but this is easier. """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - __copyright__ = """Original Author: Huang Zhen Copyright 2004 International Business Machines -with later changes copyright 2004-2019 the Pacemaker project contributors. +with later changes copyright 2004-2020 the Pacemaker project contributors. The version control history for this file may have further details. """ __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import sys from cts.CTSvars import * from cts.CTS import * from cts.CIB import * from cts.CTStests import AuditResource from cts.watcher import LogWatcher class crm_common(ClusterManager): def __init__(self, Environment, randseed=None, name=None): ClusterManager.__init__(self, Environment, randseed=randseed) self.fastfail = 0 self.cib_installed = 0 self.config = None self.cluster_monitor = 0 self.use_short_names = 1 if self.Env["DoBSC"]: del self.templates["Pat:They_stopped"] self._finalConditions() self.check_transitions = 0 self.check_elections = 0 self.CIBsync = {} self.CibFactory = ConfigFactory(self) self.cib = self.CibFactory.createConfig(self.Env["Schema"]) def errorstoignore(self): # At some point implement a more elegant solution that # also produces a report at the end """ Return a list of known error messages that should be ignored """ return PatternSelector().get_patterns(self.name, "BadNewsIgnore") def install_config(self, node): if not self.ns.WaitForNodeToComeUp(node): self.log("Node %s is not up." % node) return None if not node in self.CIBsync and self.Env["ClobberCIB"] == 1: self.CIBsync[node] = 1 self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*") # Only install the CIB on the first node, all the other ones will pick it up from there if self.cib_installed == 1: return None self.cib_installed = 1 if self.Env["CIBfilename"] == None: self.log("Installing Generated CIB on node %s" % (node)) self.cib.install(node) else: self.log("Installing CIB (%s) on node %s" % (self.Env["CIBfilename"], node)) if 0 != self.rsh.cp(self.Env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)): raise ValueError("Can not scp file to %s %d"%(node)) self.rsh(node, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml") def prepare(self): '''Finish the Initialization process. Prepare to test...''' self.partitions_expected = 1 for node in self.Env["nodes"]: self.ShouldBeStatus[node] = "" if self.Env["experimental-tests"]: self.unisolate_node(node) self.StataCM(node) def test_node_CM(self, node): '''Report the status of the cluster manager on a given node''' watchpats = [ ] watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)") watchpats.append(self.templates["Pat:NonDC_started"] % node) watchpats.append(self.templates["Pat:DC_started"] % node) idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterIdle", hosts=[node], kind=self.Env["LogWatcher"]) idle_watch.setwatch() out = self.rsh(node, self.templates["StatusCmd"]%node, 1) self.debug("Node %s status: '%s'" %(node, out)) if not out or (out.find('ok') < 0): if self.ShouldBeStatus[node] == "up": self.log( "Node status for %s is %s but we think it should be %s" % (node, "down", self.ShouldBeStatus[node])) self.ShouldBeStatus[node] = "down" return 0 if self.ShouldBeStatus[node] == "down": self.log( "Node status for %s is %s but we think it should be %s: %s" % (node, "up", self.ShouldBeStatus[node], out)) self.ShouldBeStatus[node] = "up" # check the output first - because syslog-ng loses messages if out.find('S_NOT_DC') != -1: # Up and stable return 2 if out.find('S_IDLE') != -1: # Up and stable return 2 # fall back to syslog-ng and wait if not idle_watch.look(): # just up self.debug("Warn: Node %s is unstable: %s" % (node, out)) return 1 # Up and stable return 2 # Is the node up or is the node down def StataCM(self, node): '''Report the status of the cluster manager on a given node''' if self.test_node_CM(node) > 0: return 1 return None # Being up and being stable is not the same question... def node_stable(self, node): '''Report the status of the cluster manager on a given node''' if self.test_node_CM(node) == 2: return 1 self.log("Warn: Node %s not stable" % (node)) return None def partition_stable(self, nodes, timeout=None): watchpats = [ ] watchpats.append("Current ping state: S_IDLE") watchpats.append(self.templates["Pat:DC_IDLE"]) self.debug("Waiting for cluster stability...") if timeout == None: timeout = self.Env["DeadTime"] if len(nodes) < 3: self.debug("Cluster is inactive") return 1 idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterStable", timeout, hosts=nodes.split(), kind=self.Env["LogWatcher"]) idle_watch.setwatch() for node in nodes.split(): # have each node dump its current state self.rsh(node, self.templates["StatusCmd"] % node, 1) ret = idle_watch.look() while ret: self.debug(ret) for node in nodes.split(): if re.search(node, ret): return 1 ret = idle_watch.look() self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout)) return None def cluster_stable(self, timeout=None, double_check=False): partitions = self.find_partitions() for partition in partitions: if not self.partition_stable(partition, timeout): return None if double_check: # Make sure we are really stable and that all resources, # including those that depend on transient node attributes, # are started if they were going to be time.sleep(5) for partition in partitions: if not self.partition_stable(partition, timeout): return None return 1 def is_node_dc(self, node, status_line=None): rc = 0 if not status_line: status_line = self.rsh(node, self.templates["StatusCmd"]%node, 1) if not status_line: rc = 0 elif status_line.find('S_IDLE') != -1: rc = 1 elif status_line.find('S_INTEGRATION') != -1: rc = 1 elif status_line.find('S_FINALIZE_JOIN') != -1: rc = 1 elif status_line.find('S_POLICY_ENGINE') != -1: rc = 1 elif status_line.find('S_TRANSITION_ENGINE') != -1: rc = 1 return rc def active_resources(self, node): (rc, output) = self.rsh(node, """crm_resource -c""", None) resources = [] for line in output: if re.search("^Resource", line): tmp = AuditResource(self, line) if tmp.type == "primitive" and tmp.host == node: resources.append(tmp.id) return resources def ResourceLocation(self, rid): ResourceNodes = [] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": cmd = self.templates["RscRunning"] % (rid) (rc, lines) = self.rsh(node, cmd, None) if rc == 127: self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd) for line in lines: self.log("Output: "+line) elif rc == 0: ResourceNodes.append(node) return ResourceNodes def find_partitions(self): ccm_partitions = [] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": partition = self.rsh(node, self.templates["PartitionCmd"], 1) if not partition: self.log("no partition details for %s" % node) elif len(partition) > 2: nodes = partition.split() nodes.sort() partition = ' '.join(nodes) found = 0 for a_partition in ccm_partitions: if partition == a_partition: found = 1 if found == 0: self.debug("Adding partition from %s: %s" % (node, partition)) ccm_partitions.append(partition) else: self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node)) else: self.log("bad partition details for %s" % node) else: self.debug("Node %s is down... skipping" % node) self.debug("Found partitions: %s" % repr(ccm_partitions) ) return ccm_partitions def HasQuorum(self, node_list): # If we are auditing a partition, then one side will # have quorum and the other not. # So the caller needs to tell us which we are checking # If no value for node_list is specified... assume all nodes if not node_list: node_list = self.Env["nodes"] for node in node_list: if self.ShouldBeStatus[node] == "up": quorum = self.rsh(node, self.templates["QuorumCmd"], 1) if quorum.find("1") != -1: return 1 elif quorum.find("0") != -1: return 0 else: self.debug("WARN: Unexpected quorum test result from " + node + ":" + quorum) return 0 def Components(self): complist = [] common_ignore = [ "Pending action:", "(ERROR|error): crm_log_message_adv:", "(ERROR|error): MSG: No message to dump", "pending LRM operations at shutdown", "Lost connection to the CIB manager", "Connection to the CIB terminated...", "Sending message to the CIB manager FAILED", "Action A_RECOVER .* not supported", "(ERROR|error): stonithd_op_result_ready: not signed on", "pingd.*(ERROR|error): send_update: Could not send update", "send_ipc_message: IPC Channel to .* is not connected", "unconfirmed_actions: Waiting on .* unconfirmed actions", "cib_native_msgready: Message pending on command channel", r": Performing A_EXIT_1 - forcefully exiting ", r"Resource .* was active at shutdown. You may ignore this error if it is unmanaged.", ] stonith_ignore = [ r"Updating failcount for child_DoFencing", r"error.*: Fencer connection failed \(will retry\)", "pacemaker-execd.*(ERROR|error): stonithd_receive_ops_result failed.", ] stonith_ignore.extend(common_ignore) ccm = Process(self, "ccm", triggersreboot=self.fastfail, pats = [ "State transition .* S_RECOVERY", "pacemaker-controld.*Action A_RECOVER .* not supported", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*: Could not recover from internal error", "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy", # these status numbers are likely wrong now r"pacemaker-controld.*exited with status 2", r"attrd.*exited with status 1", r"cib.*exited with status 2", # Not if it was fenced # "A new node joined the cluster", # "WARN: determine_online_status: Node .* is unclean", # "Scheduling Node .* for STONITH", # "Executing .* fencing operation", # "tengine_stonith_callback: .*result=0", # "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE", # "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN", "State transition S_STARTING -> S_PENDING", ], badnews_ignore = common_ignore) based = Process(self, "pacemaker-based", triggersreboot=self.fastfail, pats = [ "State transition .* S_RECOVERY", "Lost connection to the CIB manager", "Connection to the CIB manager terminated", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy", r"pacemaker-controld.*: Could not recover from internal error", # these status numbers are likely wrong now r"pacemaker-controld.*exited with status 2", r"attrd.*exited with status 1", ], badnews_ignore = common_ignore) execd = Process(self, "pacemaker-execd", triggersreboot=self.fastfail, pats = [ "State transition .* S_RECOVERY", "LRM Connection failed", "pacemaker-controld.*I_ERROR.*lrm_connection_destroy", "State transition S_STARTING -> S_PENDING", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*: Could not recover from internal error", # this status number is likely wrong now r"pacemaker-controld.*exited with status 2", ], badnews_ignore = common_ignore) controld = Process(self, "pacemaker-controld", triggersreboot=self.fastfail, pats = [ # "WARN: determine_online_status: Node .* is unclean", # "Scheduling Node .* for STONITH", # "Executing .* fencing operation", # "tengine_stonith_callback: .*result=0", "State transition .* S_IDLE", "State transition S_STARTING -> S_PENDING", ], badnews_ignore = common_ignore) schedulerd = Process(self, "pacemaker-schedulerd", triggersreboot=self.fastfail, pats = [ "State transition .* S_RECOVERY", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*: Could not recover from internal error", r"pacemaker-controld.*CRIT.*: Connection to the scheduler failed", "pacemaker-controld.*I_ERROR.*save_cib_contents", # this status number is likely wrong now r"pacemaker-controld.*exited with status 2", ], badnews_ignore = common_ignore, dc_only=1) if self.Env["DoFencing"] == 1 : complist.append(Process(self, "stoniths", triggersreboot=self.fastfail, dc_pats = [ r"pacemaker-controld.*CRIT.*: Fencing daemon connection failed", "Attempting connection to fencing daemon", ], badnews_ignore = stonith_ignore)) if self.fastfail == 0: ccm.pats.extend([ # these status numbers are likely wrong now r"attrd.*exited with status 1", r"pacemaker-(based|controld).*exited with status 2", ]) based.pats.extend([ # these status numbers are likely wrong now r"attrd.*exited with status 1", r"pacemaker-controld.*exited with status 2", ]) execd.pats.extend([ # these status numbers are likely wrong now r"pacemaker-controld.*exited with status 2", ]) complist.append(ccm) complist.append(based) complist.append(execd) complist.append(controld) complist.append(schedulerd) return complist def StandbyStatus(self, node): out=self.rsh(node, self.templates["StandbyQueryCmd"] % node, 1) if not out: return "off" out = out[:-1] self.debug("Standby result: "+out) return out # status == "on" : Enter Standby mode # status == "off": Enter Active mode def SetStandbyMode(self, node, status): current_status = self.StandbyStatus(node) cmd = self.templates["StandbyCmd"] % (node, status) ret = self.rsh(node, cmd) return True def AddDummyRsc(self, node, rid): rsc_xml = """ ' '""" % (rid, rid) constraint_xml = """ ' ' """ % (rid, node, node, rid) self.rsh(node, self.templates['CibAddXml'] % (rsc_xml)) self.rsh(node, self.templates['CibAddXml'] % (constraint_xml)) def RemoveDummyRsc(self, node, rid): constraint = "\"//rsc_location[@rsc='%s']\"" % (rid) rsc = "\"//primitive[@id='%s']\"" % (rid) self.rsh(node, self.templates['CibDelXpath'] % constraint) self.rsh(node, self.templates['CibDelXpath'] % rsc) ####################################################################### # # A little test code... # # Which you are advised to completely ignore... # ####################################################################### if __name__ == '__main__': pass diff --git a/cts/CM_corosync.py b/cts/CM_corosync.py index 720249e309..eb1028169b 100644 --- a/cts/CM_corosync.py +++ b/cts/CM_corosync.py @@ -1,63 +1,60 @@ """ Corosync-specific class for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2007-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2007-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" from cts.CTSvars import * from cts.CM_common import crm_common from cts.CTS import Process from cts.patterns import PatternSelector class crm_corosync(crm_common): ''' Corosync version 2 cluster manager class ''' def __init__(self, Environment, randseed=None, name=None): if not name: name="crm-corosync" crm_common.__init__(self, Environment, randseed=randseed, name=name) self.fullcomplist = {} self.templates = PatternSelector(self.name) def Components(self): complist = [] if not len(list(self.fullcomplist.keys())): for c in [ "pacemaker-based", "pacemaker-controld", "pacemaker-attrd", "pacemaker-execd", "pacemaker-fenced" ]: self.fullcomplist[c] = Process( self, c, pats = self.templates.get_component(self.name, c), badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c), common_ignore = self.templates.get_component(self.name, "common-ignore")) # the scheduler uses dc_pats instead of pats self.fullcomplist["pacemaker-schedulerd"] = Process( self, "pacemaker-schedulerd", dc_pats = self.templates.get_component(self.name, "pacemaker-schedulerd"), badnews_ignore = self.templates.get_component(self.name, "pacemaker-schedulerd-ignore"), common_ignore = self.templates.get_component(self.name, "common-ignore")) # add (or replace) extra components self.fullcomplist["corosync"] = Process( self, "corosync", pats = self.templates.get_component(self.name, "corosync"), badnews_ignore = self.templates.get_component(self.name, "corosync-ignore"), common_ignore = self.templates.get_component(self.name, "common-ignore") ) # Processes running under valgrind can't be shot with "killall -9 processname", # so don't include them in the returned list vgrind = self.Env["valgrind-procs"].split() for key in list(self.fullcomplist.keys()): if self.Env["valgrind-tests"]: if key in vgrind: self.log("Filtering %s from the component list as it is being profiled by valgrind" % key) continue if key == "pacemaker-fenced" and not self.Env["DoFencing"]: continue complist.append(self.fullcomplist[key]) return complist diff --git a/cts/CTS.py.in b/cts/CTS.py.in index 091bb1f8c1..f239edd764 100644 --- a/cts/CTS.py.in +++ b/cts/CTS.py.in @@ -1,939 +1,933 @@ """ Main classes for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2000-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2000-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import os import re import sys import time import traceback -if sys.version_info > (3,): - from collections import UserDict -else: - from UserDict import UserDict +from collections import UserDict from cts.CTSvars import * from cts.logging import LogFactory from cts.watcher import LogWatcher from cts.remote import RemoteFactory, input_wrapper from cts.environment import EnvFactory from cts.patterns import PatternSelector has_log_stats = {} log_stats_bin = CTSvars.CRM_DAEMON_DIR + "/cts_log_stats.sh" log_stats = """ #!@BASH_PATH@ # Tool for generating system load reports while CTS runs trap "" 1 f=$1; shift action=$1; shift base=`basename $0` if [ ! -e $f ]; then echo "Time, Load 1, Load 5, Load 15, Test Marker" > $f fi function killpid() { if [ -e $f.pid ]; then kill -9 `cat $f.pid` rm -f $f.pid fi } function status() { if [ -e $f.pid ]; then kill -0 `cat $f.pid` return $? else return 1 fi } function start() { # Is it already running? if status then return fi echo Active as $$ echo $$ > $f.pid while [ 1 = 1 ]; do uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f #top -b -c -n1 | grep -e usr/libexec/pacemaker | grep -v -e grep -e python | head -n 1 | sed s@/usr/libexec/pacemaker/@@ | awk '{print " 0, "$9", "$10", "$12}' | tr '\\n' ',' >> $f echo 0 >> $f sleep 5 done } case $action in start) start ;; start-bg|bg) # Use c --ssh -- ./stats.sh file start-bg nohup $0 $f start >/dev/null 2>&1 > $f echo " $*" >> $f start ;; *) echo "Unknown action: $action." ;; esac """ class CtsLab(object): '''This class defines the Lab Environment for the Cluster Test System. It defines those things which are expected to change from test environment to test environment for the same cluster manager. It is where you define the set of nodes that are in your test lab what kind of reset mechanism you use, etc. This class is derived from a UserDict because we hold many different parameters of different kinds, and this provides a uniform and extensible interface useful for any kind of communication between the user/administrator/tester and CTS. At this point in time, it is the intent of this class to model static configuration and/or environmental data about the environment which doesn't change as the tests proceed. Well-known names (keys) are an important concept in this class. The HasMinimalKeys member function knows the minimal set of well-known names for the class. The following names are standard (well-known) at this time: nodes An array of the nodes in the cluster reset A ResetMechanism object logger An array of objects that log strings... CMclass The type of ClusterManager we are running (This is a class object, not a class instance) RandSeed Random seed. It is a triple of bytes. (optional) The CTS code ignores names it doesn't know about/need. The individual tests have access to this information, and it is perfectly acceptable to provide hints, tweaks, fine-tuning directions or other information to the tests through this mechanism. ''' def __init__(self, args=None): self.Env = EnvFactory().getInstance(args) self.Scenario = None self.logger = LogFactory() self.rsh = RemoteFactory().getInstance() def dump(self): self.Env.dump() def has_key(self, key): return key in list(self.Env.keys()) def __getitem__(self, key): return self.Env[key] def __setitem__(self, key, value): self.Env[key] = value def run(self, Scenario, Iterations): if not Scenario: self.logger.log("No scenario was defined") return 1 self.logger.log("Cluster nodes: ") for node in self.Env["nodes"]: self.logger.log(" * %s" % (node)) if not Scenario.SetUp(): return 1 try : Scenario.run(Iterations) except : self.logger.log("Exception by %s" % sys.exc_info()[0]) self.logger.traceback(traceback) Scenario.summarize() Scenario.TearDown() return 1 #ClusterManager.oprofileSave(Iterations) Scenario.TearDown() Scenario.summarize() if Scenario.Stats["failure"] > 0: return Scenario.Stats["failure"] elif Scenario.Stats["success"] != Iterations: self.logger.log("No failure count but success != requested iterations") return 1 return 0 def __CheckNode(self, node): "Raise a ValueError if the given node isn't valid" if not self.IsValidNode(node): raise ValueError("Invalid node [%s] in CheckNode" % node) class NodeStatus(object): def __init__(self, env): self.Env = env def IsNodeBooted(self, node): '''Return TRUE if the given node is booted (responds to pings)''' if self.Env["docker"]: return RemoteFactory().getInstance()("localhost", "docker inspect --format {{.State.Running}} %s | grep -q true" % node, silent=True) == 0 return RemoteFactory().getInstance()("localhost", "ping -nq -c1 -w1 %s" % node, silent=True) == 0 def IsSshdUp(self, node): rc = RemoteFactory().getInstance()(node, "true", silent=True) return rc == 0 def WaitForNodeToComeUp(self, node, Timeout=300): '''Return TRUE when given node comes up, or None/FALSE if timeout''' timeout = Timeout anytimeouts = 0 while timeout > 0: if self.IsNodeBooted(node) and self.IsSshdUp(node): if anytimeouts: # Fudge to wait for the system to finish coming up time.sleep(30) LogFactory().debug("Node %s now up" % node) return 1 time.sleep(30) if (not anytimeouts): LogFactory().debug("Waiting for node %s to come up" % node) anytimeouts = 1 timeout = timeout - 1 LogFactory().log("%s did not come up within %d tries" % (node, Timeout)) if self.Env["continue"] == 1: answer = "Y" else: try: answer = input_wrapper('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("%s did not come up within %d tries" % (node, Timeout)) def WaitForAllNodesToComeUp(self, nodes, timeout=300): '''Return TRUE when all nodes come up, or FALSE if timeout''' for node in nodes: if not self.WaitForNodeToComeUp(node, timeout): return None return 1 class ClusterManager(UserDict): '''The Cluster Manager class. This is an subclass of the Python dictionary class. (this is because it contains lots of {name,value} pairs, not because it's behavior is that terribly similar to a dictionary in other ways.) This is an abstract class which class implements high-level operations on the cluster and/or its cluster managers. Actual cluster managers classes are subclassed from this type. One of the things we do is track the state we think every node should be in. ''' def __InitialConditions(self): #if os.geteuid() != 0: # raise ValueError("Must Be Root!") None def _finalConditions(self): for key in list(self.keys()): if self[key] == None: raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.") def __init__(self, Environment, randseed=None): self.Env = EnvFactory().getInstance() self.templates = PatternSelector(self.Env["Name"]) self.__InitialConditions() self.logger = LogFactory() self.TestLoggingLevel=0 self.data = {} self.name = self.Env["Name"] self.rsh = RemoteFactory().getInstance() self.ShouldBeStatus={} self.ns = NodeStatus(self.Env) self.OurNode = os.uname()[1].lower() self.__instance_errorstoignore = [] def __getitem__(self, key): if key == "Name": return self.name print("FIXME: Getting %s from %s" % (key, repr(self))) if key in self.data: return self.data[key] return self.templates.get_patterns(self.Env["Name"], key) def __setitem__(self, key, value): print("FIXME: Setting %s=%s on %s" % (key, value, repr(self))) self.data[key] = value def key_for_node(self, node): return node def instance_errorstoignore_clear(self): '''Allows the test scenario to reset instance errors to ignore on each iteration.''' self.__instance_errorstoignore = [] def instance_errorstoignore(self): '''Return list of errors which are 'normal' for a specific test instance''' return self.__instance_errorstoignore def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] def log(self, args): self.logger.log(args) def debug(self, args): self.logger.debug(args) def prepare(self): '''Finish the Initialization process. Prepare to test...''' print(repr(self)+"prepare") for node in self.Env["nodes"]: if self.StataCM(node): self.ShouldBeStatus[node] = "up" else: self.ShouldBeStatus[node] = "down" if self.Env["experimental-tests"]: self.unisolate_node(node) def upcount(self): '''How many nodes are up?''' count = 0 for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": count = count + 1 return count def install_support(self, command="install"): for node in self.Env["nodes"]: self.rsh(node, CTSvars.CRM_DAEMON_DIR + "/cts-support " + command) def install_config(self, node): return None def prepare_fencing_watcher(self, name): # If we don't have quorum now but get it as a result of starting this node, # then a bunch of nodes might get fenced upnode = None if self.HasQuorum(None): self.debug("Have quorum") return None if not self.templates["Pat:Fencing_start"]: print("No start pattern") return None if not self.templates["Pat:Fencing_ok"]: print("No ok pattern") return None stonith = None stonithPats = [] for peer in self.Env["nodes"]: if self.ShouldBeStatus[peer] != "up": stonithPats.append(self.templates["Pat:Fencing_ok"] % peer) stonithPats.append(self.templates["Pat:Fencing_start"] % peer) stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"]) stonith.setwatch() return stonith def fencing_cleanup(self, node, stonith): peer_list = [] peer_state = {} self.debug("Looking for nodes that were fenced as a result of %s starting" % node) # If we just started a node, we may now have quorum (and permission to fence) if not stonith: self.debug("Nothing to do") return peer_list q = self.HasQuorum(None) if not q and len(self.Env["nodes"]) > 2: # We didn't gain quorum - we shouldn't have shot anyone self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"]))) return peer_list for n in self.Env["nodes"]: peer_state[n] = "unknown" # Now see if any states need to be updated self.debug("looking for: " + repr(stonith.regexes)) shot = stonith.look(0) while shot: line = repr(shot) self.debug("Found: " + line) del stonith.regexes[stonith.whichmatch] # Extract node name for n in self.Env["nodes"]: if re.search(self.templates["Pat:Fencing_ok"] % n, shot): peer = n peer_state[peer] = "complete" self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer) elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot): # TODO: Correctly detect multiple fencing operations for the same host peer = n peer_state[peer] = "in-progress" self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer) if not peer: self.logger.log("ERROR: Unknown stonith match: %s" % line) elif not peer in peer_list: self.debug("Found peer: " + peer) peer_list.append(peer) # Get the next one shot = stonith.look(60) for peer in peer_list: self.debug(" Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer])) if self.Env["at-boot"]: self.ShouldBeStatus[peer] = "up" else: self.ShouldBeStatus[peer] = "down" if peer_state[peer] == "in-progress": # Wait for any in-progress operations to complete shot = stonith.look(60) while len(stonith.regexes) and shot: line = repr(shot) self.debug("Found: " + line) del stonith.regexes[stonith.whichmatch] shot = stonith.look(60) # Now make sure the node is alive too self.ns.WaitForNodeToComeUp(peer, self.Env["DeadTime"]) # Poll until it comes up if self.Env["at-boot"]: if not self.StataCM(peer): time.sleep(self.Env["StartTime"]) if not self.StataCM(peer): self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer) return None return peer_list def StartaCM(self, node, verbose=False): '''Start up the cluster manager on a given node''' if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node)) else: self.debug("Starting %s on node %s" % (self.templates["Name"], node)) ret = 1 if not node in self.ShouldBeStatus: self.ShouldBeStatus[node] = "down" if self.ShouldBeStatus[node] != "down": return 1 patterns = [] # Technically we should always be able to notice ourselves starting patterns.append(self.templates["Pat:Local_started"] % node) if self.upcount() == 0: patterns.append(self.templates["Pat:DC_started"] % node) else: patterns.append(self.templates["Pat:NonDC_started"] % node) watch = LogWatcher( self.Env["LogFileName"], patterns, "StartaCM", self.Env["StartTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"]) self.install_config(node) self.ShouldBeStatus[node] = "any" if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]): self.logger.log ("%s was already started" % (node)) return 1 stonith = self.prepare_fencing_watcher(node) watch.setwatch() if self.rsh(node, self.templates["StartCmd"]) != 0: self.logger.log ("Warn: Start command failed on node %s" % (node)) self.fencing_cleanup(node, stonith) return None self.ShouldBeStatus[node] = "up" watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Startup pattern not found: %s" % (regex)) if watch_result and self.cluster_stable(self.Env["DeadTime"]): #self.debug("Found match: "+ repr(watch_result)) self.fencing_cleanup(node, stonith) return 1 elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]): self.fencing_cleanup(node, stonith) return 1 self.logger.log ("Warn: Start failed for node %s" % (node)) return None def StartaCMnoBlock(self, node, verbose=False): '''Start up the cluster manager on a given node with none-block mode''' if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node)) else: self.debug("Starting %s on node %s" % (self["Name"], node)) self.install_config(node) self.rsh(node, self.templates["StartCmd"], synchronous=0) self.ShouldBeStatus[node] = "up" return 1 def StopaCM(self, node, verbose=False, force=False): '''Stop the cluster manager on a given node''' if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node)) else: self.debug("Stopping %s on node %s" % (self["Name"], node)) if self.ShouldBeStatus[node] != "up" and force == False: return 1 if self.rsh(node, self.templates["StopCmd"]) == 0: # Make sure we can continue even if corosync leaks # fdata-* is the old name #self.rsh(node, "rm -rf /dev/shm/qb-* /dev/shm/fdata-*") self.ShouldBeStatus[node] = "down" self.cluster_stable(self.Env["DeadTime"]) return 1 else: self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node)) return None def StopaCMnoBlock(self, node): '''Stop the cluster manager on a given node with none-block mode''' self.debug("Stopping %s on node %s" % (self["Name"], node)) self.rsh(node, self.templates["StopCmd"], synchronous=0) self.ShouldBeStatus[node] = "down" return 1 def cluster_stable(self, timeout = None): time.sleep(self.Env["StableTime"]) return 1 def node_stable(self, node): return 1 def RereadCM(self, node): '''Force the cluster manager on a given node to reread its config This may be a no-op on certain cluster managers. ''' rc=self.rsh(node, self.templates["RereadCmd"]) if rc == 0: return 1 else: self.logger.log ("Could not force %s on node %s to reread its config" % (self["Name"], node)) return None def StataCM(self, node): '''Report the status of the cluster manager on a given node''' out=self.rsh(node, self.templates["StatusCmd"] % node, 1) ret= (str.find(out, 'stopped') == -1) try: if ret: if self.ShouldBeStatus[node] == "down": self.logger.log( "Node status for %s is %s but we think it should be %s" % (node, "up", self.ShouldBeStatus[node])) else: if self.ShouldBeStatus[node] == "up": self.logger.log( "Node status for %s is %s but we think it should be %s" % (node, "down", self.ShouldBeStatus[node])) except KeyError: pass if ret: self.ShouldBeStatus[node] = "up" else: self.ShouldBeStatus[node] = "down" return ret def startall(self, nodelist=None, verbose=False, quick=False): '''Start the cluster manager on every node in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' map = {} if not nodelist: nodelist = self.Env["nodes"] for node in nodelist: if self.ShouldBeStatus[node] == "down": self.ns.WaitForAllNodesToComeUp(nodelist, 300) if not quick: # This is used for "basic sanity checks", so only start one node ... if not self.StartaCM(node, verbose=verbose): return 0 return 1 # Approximation of SimulStartList for --boot watchpats = [ ] watchpats.append(self.templates["Pat:DC_IDLE"]) for node in nodelist: watchpats.append(self.templates["Pat:Local_started"] % node) watchpats.append(self.templates["Pat:InfraUp"] % node) watchpats.append(self.templates["Pat:PacemakerUp"] % node) # Start all the nodes - at about the same time... watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"]) watch.setwatch() if not self.StartaCM(nodelist[0], verbose=verbose): return 0 for node in nodelist: self.StartaCMnoBlock(node, verbose=verbose) watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Startup pattern not found: %s" % (regex)) if not self.cluster_stable(): self.logger.log("Cluster did not stabilize") return 0 return 1 def stopall(self, nodelist=None, verbose=False, force=False): '''Stop the cluster managers on every node in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' ret = 1 map = {} if not nodelist: nodelist = self.Env["nodes"] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up" or force == True: if not self.StopaCM(node, verbose=verbose, force=force): ret = 0 return ret def rereadall(self, nodelist=None): '''Force the cluster managers on every node in the cluster to reread their config files. We can do it on a subset of the cluster if nodelist is not None. ''' map = {} if not nodelist: nodelist = self.Env["nodes"] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": self.RereadCM(node) def statall(self, nodelist=None): '''Return the status of the cluster managers in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' result = {} if not nodelist: nodelist = self.Env["nodes"] for node in nodelist: if self.StataCM(node): result[node] = "up" else: result[node] = "down" return result def isolate_node(self, target, nodes=None): '''isolate the communication between the nodes''' if not nodes: nodes = self.Env["nodes"] for node in nodes: if node != target: rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node)) if rc != 0: self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc)) return None else: self.debug("Communication cut between %s and %s" % (target, node)) return 1 def unisolate_node(self, target, nodes=None): '''fix the communication between the nodes''' if not nodes: nodes = self.Env["nodes"] for node in nodes: if node != target: restored = 0 # Limit the amount of time we have asynchronous connectivity for # Restore both sides as simultaneously as possible self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=0) self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=0) self.debug("Communication restored between %s and %s" % (target, node)) def reducecomm_node(self,node): '''reduce the communication between the nodes''' rc = self.rsh(node, self.templates["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"])) if rc == 0: return 1 else: self.logger.log("Could not reduce the communication between the nodes from node: %s" % node) return None def restorecomm_node(self,node): '''restore the saved communication between the nodes''' rc = 0 if float(self.Env["XmitLoss"]) != 0 or float(self.Env["RecvLoss"]) != 0 : rc = self.rsh(node, self.templates["RestoreCommCmd"]); if rc == 0: return 1 else: self.logger.log("Could not restore the communication between the nodes from node: %s" % node) return None def HasQuorum(self, node_list): "Return TRUE if the cluster currently has quorum" # If we are auditing a partition, then one side will # have quorum and the other not. # So the caller needs to tell us which we are checking # If no value for node_list is specified... assume all nodes raise ValueError("Abstract Class member (HasQuorum)") def Components(self): raise ValueError("Abstract Class member (Components)") def oprofileStart(self, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileStart(n) elif node in self.Env["oprofile"]: self.debug("Enabling oprofile on %s" % node) self.rsh(node, "opcontrol --init") self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all") self.rsh(node, "opcontrol --start") self.rsh(node, "opcontrol --reset") def oprofileSave(self, test, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileSave(test, n) elif node in self.Env["oprofile"]: self.rsh(node, "opcontrol --dump") self.rsh(node, "opcontrol --save=cts.%d" % test) # Read back with: opreport -l session:cts.0 image:/c* if None: self.rsh(node, "opcontrol --reset") else: self.oprofileStop(node) self.oprofileStart(node) def oprofileStop(self, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileStop(n) elif node in self.Env["oprofile"]: self.debug("Stopping oprofile on %s" % node) self.rsh(node, "opcontrol --reset") self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null") def StatsExtract(self): if not self.Env["stats"]: return for host in self.Env["nodes"]: log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR if host in has_log_stats: self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file)) (rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2) self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file)) fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host) print("Extracted stats: %s" % fname) fd = open(fname, "a") fd.writelines(lines) fd.close() def StatsMark(self, testnum): '''Mark the test number in the stats log''' global has_log_stats if not self.Env["stats"]: return for host in self.Env["nodes"]: log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR if not host in has_log_stats: global log_stats global log_stats_bin script=log_stats #script = re.sub("\\\\", "\\\\", script) script = re.sub('\"', '\\\"', script) script = re.sub("'", "\'", script) script = re.sub("`", "\`", script) script = re.sub("\$", "\\\$", script) self.debug("Installing %s on %s" % (log_stats_bin, host)) self.rsh(host, '''echo "%s" > %s''' % (script, log_stats_bin), silent=True) self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file)) has_log_stats[host] = 1 # Now mark it self.rsh(host, '''bash %s %s mark %s''' % (log_stats_bin, log_stats_file, testnum), synchronous=0) class Resource(object): ''' This is an HA resource (not a resource group). A resource group is just an ordered list of Resource objects. ''' def __init__(self, cm, rsctype=None, instance=None): self.CM = cm self.ResourceType = rsctype self.Instance = instance self.needs_quorum = 1 def Type(self): return self.ResourceType def Instance(self, nodename): return self.Instance def IsRunningOn(self, nodename): ''' This member function returns true if our resource is running on the given node in the cluster. It is analagous to the "status" operation on SystemV init scripts and heartbeat scripts. FailSafe calls it the "exclusive" operation. ''' raise ValueError("Abstract Class member (IsRunningOn)") return None def IsWorkingCorrectly(self, nodename): ''' This member function returns true if our resource is operating correctly on the given node in the cluster. OCF does not require this operation, but it might be called the Monitor operation, which is what FailSafe calls it. For remotely monitorable resources (like IP addresses), they *should* be monitored remotely for testing. ''' raise ValueError("Abstract Class member (IsWorkingCorrectly)") return None def Start(self, nodename): ''' This member function starts or activates the resource. ''' raise ValueError("Abstract Class member (Start)") return None def Stop(self, nodename): ''' This member function stops or deactivates the resource. ''' raise ValueError("Abstract Class member (Stop)") return None def __repr__(self): if (self.Instance and len(self.Instance) > 1): return "{" + self.ResourceType + "::" + self.Instance + "}" else: return "{" + self.ResourceType + "}" class Component(object): def kill(self, node): None class Process(Component): def __init__(self, cm, name, process=None, dc_only=0, pats=[], dc_pats=[], badnews_ignore=[], common_ignore=[], triggersreboot=0): self.name = str(name) self.dc_only = dc_only self.pats = pats self.dc_pats = dc_pats self.CM = cm self.badnews_ignore = badnews_ignore self.badnews_ignore.extend(common_ignore) self.triggersreboot = triggersreboot if process: self.proc = str(process) else: self.proc = str(name) self.KillCmd = "killall -9 " + self.proc def kill(self, node): if self.CM.rsh(node, self.KillCmd) != 0: self.CM.log ("ERROR: Kill %s failed on node %s" % (self.name,node)) return None return 1 diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py index cc82171081..9a8d64cfe9 100755 --- a/cts/CTSaudits.py +++ b/cts/CTSaudits.py @@ -1,865 +1,862 @@ """ Auditing classes for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2000-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2000-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import time, re, uuid from cts.watcher import LogWatcher from cts.remote import input_wrapper from cts.CTSvars import * class ClusterAudit(object): def __init__(self, cm): self.CM = cm def __call__(self): raise ValueError("Abstract Class member (__call__)") def is_applicable(self): '''Return TRUE if we are applicable in the current test configuration''' raise ValueError("Abstract Class member (is_applicable)") return 1 def log(self, args): self.CM.log("audit: %s" % args) def debug(self, args): self.CM.debug("audit: %s" % args) def name(self): raise ValueError("Abstract Class member (name)") AllAuditClasses = [ ] class LogAudit(ClusterAudit): def name(self): return "LogAudit" def __init__(self, cm): self.CM = cm self.kinds = [ "combined syslog", "journal", "remote" ] def RestartClusterLogging(self, nodes=None): if not nodes: nodes = self.CM.Env["nodes"] self.CM.debug("Restarting logging on: %s" % repr(nodes)) for node in nodes: if self.CM.Env["have_systemd"]: if self.CM.rsh(node, "systemctl stop systemd-journald.socket") != 0: self.CM.log ("ERROR: Cannot stop 'systemd-journald' on %s" % node) if self.CM.rsh(node, "systemctl start systemd-journald.service") != 0: self.CM.log ("ERROR: Cannot start 'systemd-journald' on %s" % node) if self.CM.rsh(node, "service %s restart" % self.CM.Env["syslogd"]) != 0: self.CM.log ("ERROR: Cannot restart '%s' on %s" % (self.CM.Env["syslogd"], node)) def TestLogging(self): patterns = [] prefix = "Test message from" suffix = str(uuid.uuid4()) watch = {} for node in self.CM.Env["nodes"]: # Look for the node name in two places to make sure # that syslog is logging with the correct hostname m = re.search("^([^.]+).*", node) if m: simple = m.group(1) else: simple = node patterns.append("%s.*%s %s %s" % (simple, prefix, node, suffix)) watch_pref = self.CM.Env["LogWatcher"] if watch_pref == "any": for k in self.kinds: watch[k] = LogWatcher(self.CM.Env["LogFileName"], patterns, "LogAudit", 5, silent=True, hosts=self.CM.Env["nodes"], kind=k) watch[k].setwatch() else: k = watch_pref watch[k] = LogWatcher(self.CM.Env["LogFileName"], patterns, "LogAudit", 5, silent=True, hosts=self.CM.Env["nodes"], kind=k) watch[k].setwatch() if watch_pref == "any": self.CM.log("Writing log with key: %s" % (suffix)) for node in self.CM.Env["nodes"]: cmd = "logger -p %s.info %s %s %s" % (self.CM.Env["SyslogFacility"], prefix, node, suffix) if self.CM.rsh(node, cmd, synchronous=0, silent=True) != 0: self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node)) for k in self.kinds: if k in watch: w = watch[k] if watch_pref == "any": self.CM.log("Testing for %s logs" % (k)) w.lookforall(silent=True) if not w.unmatched: if watch_pref == "any": self.CM.log ("Continuing with %s-based log reader" % (w.kind)) self.CM.Env["LogWatcher"] = w.kind return 1 for k in list(watch.keys()): w = watch[k] if w.unmatched: for regex in w.unmatched: self.CM.log ("Test message [%s] not found in %s logs." % (regex, w.kind)) return 0 def __call__(self): max = 3 attempt = 0 self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"]) while attempt <= max and self.TestLogging() == 0: attempt = attempt + 1 self.RestartClusterLogging() time.sleep(60*attempt) if attempt > max: self.CM.log("ERROR: Cluster logging unrecoverable.") return 0 return 1 def is_applicable(self): if self.CM.Env["DoBSC"]: return 0 if self.CM.Env["LogAuditDisabled"]: return 0 return 1 class DiskAudit(ClusterAudit): def name(self): return "DiskspaceAudit" def __init__(self, cm): self.CM = cm def __call__(self): result = 1 # @TODO Use directory of PCMK_logfile if set on host dfcmd = "df -BM " + CTSvars.CRM_LOG_DIR + " | tail -1 | awk '{print $(NF-1)\" \"$(NF-2)}' | tr -d 'M%'" self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"]) for node in self.CM.Env["nodes"]: dfout = self.CM.rsh(node, dfcmd, 1) if not dfout: self.CM.log ("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node)) else: try: (used, remain) = dfout.split() used_percent = int(used) remaining_mb = int(remain) except (ValueError, TypeError): self.CM.log("Warning: df output '%s' from %s was invalid [%s, %s]" % (dfout, node, used, remain)) else: if remaining_mb < 10 or used_percent > 95: self.CM.log("CRIT: Out of log disk space on %s (%d%% / %dMB)" % (node, used_percent, remaining_mb)) result = None if self.CM.Env["continue"] == 1: answer = "Y" else: try: answer = input_wrapper('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("Disk full on %s" % (node)) elif remaining_mb < 100 or used_percent > 90: self.CM.log("WARN: Low on log disk space (%dMB) on %s" % (remaining_mb, node)) return result def is_applicable(self): if self.CM.Env["DoBSC"]: return 0 return 1 class FileAudit(ClusterAudit): def name(self): return "FileAudit" def __init__(self, cm): self.CM = cm self.known = [] def __call__(self): result = 1 self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"]) for node in self.CM.Env["nodes"]: (rc, lsout) = self.CM.rsh(node, "ls -al /var/lib/pacemaker/cores/* | grep core.[0-9]", None) for line in lsout: line = line.strip() if line not in self.known: result = 0 self.known.append(line) self.CM.log("Warning: Pacemaker core file on %s: %s" % (node, line)) (rc, lsout) = self.CM.rsh(node, "ls -al /var/lib/corosync | grep core.[0-9]", None) for line in lsout: line = line.strip() if line not in self.known: result = 0 self.known.append(line) self.CM.log("Warning: Corosync core file on %s: %s" % (node, line)) if node in self.CM.ShouldBeStatus and self.CM.ShouldBeStatus[node] == "down": clean = 0 (rc, lsout) = self.CM.rsh(node, "ls -al /dev/shm | grep qb-", None) for line in lsout: result = 0 clean = 1 self.CM.log("Warning: Stale IPC file on %s: %s" % (node, line)) if clean: (rc, lsout) = self.CM.rsh(node, "ps axf | grep -e pacemaker -e corosync", None) for line in lsout: self.CM.debug("ps[%s]: %s" % (node, line)) self.CM.rsh(node, "rm -rf /dev/shm/qb-*") else: self.CM.debug("Skipping %s" % node) return result def is_applicable(self): return 1 class AuditResource(object): def __init__(self, cm, line): fields = line.split() self.CM = cm self.line = line self.type = fields[1] self.id = fields[2] self.clone_id = fields[3] self.parent = fields[4] self.rprovider = fields[5] self.rclass = fields[6] self.rtype = fields[7] self.host = fields[8] self.needs_quorum = fields[9] self.flags = int(fields[10]) self.flags_s = fields[11] if self.parent == "NA": self.parent = None def unique(self): if self.flags & int("0x00000020", 16): return 1 return 0 def orphan(self): if self.flags & int("0x00000001", 16): return 1 return 0 def managed(self): if self.flags & int("0x00000002", 16): return 1 return 0 class AuditConstraint(object): def __init__(self, cm, line): fields = line.split() self.CM = cm self.line = line self.type = fields[1] self.id = fields[2] self.rsc = fields[3] self.target = fields[4] self.score = fields[5] self.rsc_role = fields[6] self.target_role = fields[7] if self.rsc_role == "NA": self.rsc_role = None if self.target_role == "NA": self.target_role = None class PrimitiveAudit(ClusterAudit): def name(self): return "PrimitiveAudit" def __init__(self, cm): self.CM = cm def doResourceAudit(self, resource, quorum): rc = 1 active = self.CM.ResourceLocation(resource.id) if len(active) == 1: if quorum: self.debug("Resource %s active on %s" % (resource.id, repr(active))) elif resource.needs_quorum == 1: self.CM.log("Resource %s active without quorum: %s" % (resource.id, repr(active))) rc = 0 elif not resource.managed(): self.CM.log("Resource %s not managed. Active on %s" % (resource.id, repr(active))) elif not resource.unique(): # TODO: Figure out a clever way to actually audit these resource types if len(active) > 1: self.debug("Non-unique resource %s is active on: %s" % (resource.id, repr(active))) else: self.debug("Non-unique resource %s is not active" % resource.id) elif len(active) > 1: self.CM.log("Resource %s is active multiple times: %s" % (resource.id, repr(active))) rc = 0 elif resource.orphan(): self.debug("Resource %s is an inactive orphan" % resource.id) elif len(self.inactive_nodes) == 0: self.CM.log("WARN: Resource %s not served anywhere" % resource.id) rc = 0 elif self.CM.Env["warn-inactive"] == 1: if quorum or not resource.needs_quorum: self.CM.log("WARN: Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) else: self.debug("Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) elif quorum or not resource.needs_quorum: self.debug("Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) return rc def setup(self): self.target = None self.resources = [] self.constraints = [] self.active_nodes = [] self.inactive_nodes = [] for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.active_nodes.append(node) else: self.inactive_nodes.append(node) for node in self.CM.Env["nodes"]: if self.target == None and self.CM.ShouldBeStatus[node] == "up": self.target = node if not self.target: # TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource # with CIB_file=/path/to/cib.xml even when the cluster isn't running self.debug("No nodes active - skipping %s" % self.name()) return 0 (rc, lines) = self.CM.rsh(self.target, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): self.resources.append(AuditResource(self.CM, line)) elif re.search("^Constraint", line): self.constraints.append(AuditConstraint(self.CM, line)) else: self.CM.log("Unknown entry: %s" % line); return 1 def __call__(self): rc = 1 if not self.setup(): return 1 quorum = self.CM.HasQuorum(None) for resource in self.resources: if resource.type == "primitive": if self.doResourceAudit(resource, quorum) == 0: rc = 0 return rc def is_applicable(self): # @TODO Due to long-ago refactoring, this name test would never match, # so this audit (and those derived from it) would never run. # Uncommenting the next lines fixes the name test, but that then # exposes pre-existing bugs that need to be fixed. #if self.CM["Name"] == "crm-corosync": # return 1 return 0 class GroupAudit(PrimitiveAudit): def name(self): return "GroupAudit" def __call__(self): rc = 1 if not self.setup(): return 1 for group in self.resources: if group.type == "group": first_match = 1 group_location = None for child in self.resources: if child.parent == group.id: nodes = self.CM.ResourceLocation(child.id) if first_match and len(nodes) > 0: group_location = nodes[0] first_match = 0 if len(nodes) > 1: rc = 0 self.CM.log("Child %s of %s is active more than once: %s" % (child.id, group.id, repr(nodes))) elif len(nodes) == 0: # Groups are allowed to be partially active # However we do need to make sure later children aren't running group_location = None self.debug("Child %s of %s is stopped" % (child.id, group.id)) elif nodes[0] != group_location: rc = 0 self.CM.log("Child %s of %s is active on the wrong node (%s) expected %s" % (child.id, group.id, nodes[0], group_location)) else: self.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0])) return rc class CloneAudit(PrimitiveAudit): def name(self): return "CloneAudit" def __call__(self): rc = 1 if not self.setup(): return 1 for clone in self.resources: if clone.type == "clone": for child in self.resources: if child.parent == clone.id and child.type == "primitive": self.debug("Checking child %s of %s..." % (child.id, clone.id)) # Check max and node_max # Obtain with: # crm_resource -g clone_max --meta -r child.id # crm_resource -g clone_node_max --meta -r child.id return rc class ColocationAudit(PrimitiveAudit): def name(self): return "ColocationAudit" def crm_location(self, resource): (rc, lines) = self.CM.rsh(self.target, "crm_resource -W -r %s -Q"%resource, None) hosts = [] if rc == 0: for line in lines: fields = line.split() hosts.append(fields[0]) return hosts def __call__(self): rc = 1 if not self.setup(): return 1 for coloc in self.constraints: if coloc.type == "rsc_colocation": source = self.crm_location(coloc.rsc) target = self.crm_location(coloc.target) if len(source) == 0: self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc)) else: for node in source: if not node in target: rc = 0 self.CM.log("Colocation audit (%s): %s running on %s (not in %s)" % (coloc.id, coloc.rsc, node, repr(target))) else: self.debug("Colocation audit (%s): %s running on %s (in %s)" % (coloc.id, coloc.rsc, node, repr(target))) return rc class ControllerStateAudit(ClusterAudit): def __init__(self, cm): self.CM = cm self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} def has_key(self, key): return key in self.Stats def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 def __call__(self): passed = 1 up_are_down = 0 down_are_up = 0 unstable_list = [] for node in self.CM.Env["nodes"]: should_be = self.CM.ShouldBeStatus[node] rc = self.CM.test_node_CM(node) if rc > 0: if should_be == "down": down_are_up = down_are_up + 1 if rc == 1: unstable_list.append(node) elif should_be == "up": up_are_down = up_are_down + 1 if len(unstable_list) > 0: passed = 0 self.CM.log("Cluster is not stable: %d (of %d): %s" % (len(unstable_list), self.CM.upcount(), repr(unstable_list))) if up_are_down > 0: passed = 0 self.CM.log("%d (of %d) nodes expected to be up were down." % (up_are_down, len(self.CM.Env["nodes"]))) if down_are_up > 0: passed = 0 self.CM.log("%d (of %d) nodes expected to be down were up." % (down_are_up, len(self.CM.Env["nodes"]))) return passed def name(self): return "ControllerStateAudit" def is_applicable(self): # @TODO Due to long-ago refactoring, this name test would never match, # so this audit (and those derived from it) would never run. # Uncommenting the next lines fixes the name test, but that then # exposes pre-existing bugs that need to be fixed. #if self.CM["Name"] == "crm-corosync": # return 1 return 0 class CIBAudit(ClusterAudit): def __init__(self, cm): self.CM = cm self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} def has_key(self, key): return key in self.Stats def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 def __call__(self): passed = 1 ccm_partitions = self.CM.find_partitions() if len(ccm_partitions) == 0: self.debug("\tNo partitions to audit") return 1 for partition in ccm_partitions: self.debug("\tAuditing CIB consistency for: %s" % partition) partition_passed = 0 if self.audit_cib_contents(partition) == 0: passed = 0 return passed def audit_cib_contents(self, hostlist): passed = 1 node0 = None node0_xml = None partition_hosts = hostlist.split() for node in partition_hosts: node_xml = self.store_remote_cib(node, node0) if node_xml == None: self.CM.log("Could not perform audit: No configuration from %s" % node) passed = 0 elif node0 == None: node0 = node node0_xml = node_xml elif node0_xml == None: self.CM.log("Could not perform audit: No configuration from %s" % node0) passed = 0 else: (rc, result) = self.CM.rsh( node0, "crm_diff -VV -cf --new %s --original %s" % (node_xml, node0_xml), None) if rc != 0: self.CM.log("Diff between %s and %s failed: %d" % (node0_xml, node_xml, rc)) passed = 0 for line in result: if not re.search("", line): passed = 0 self.debug("CibDiff[%s-%s]: %s" % (node0, node, line)) else: self.debug("CibDiff[%s-%s] Ignoring: %s" % (node0, node, line)) # self.CM.rsh(node0, "rm -f %s" % node_xml) # self.CM.rsh(node0, "rm -f %s" % node0_xml) return passed def store_remote_cib(self, node, target): combined = "" filename = "/tmp/ctsaudit.%s.xml" % node if not target: target = node (rc, lines) = self.CM.rsh(node, self.CM["CibQuery"], None) if rc != 0: self.CM.log("Could not retrieve configuration") return None self.CM.rsh("localhost", "rm -f %s" % filename) for line in lines: self.CM.rsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename), silent=True) if self.CM.rsh.cp(filename, "root@%s:%s" % (target, filename), silent=True) != 0: self.CM.log("Could not store configuration") return None return filename def name(self): return "CibAudit" def is_applicable(self): # @TODO Due to long-ago refactoring, this name test would never match, # so this audit (and those derived from it) would never run. # Uncommenting the next lines fixes the name test, but that then # exposes pre-existing bugs that need to be fixed. #if self.CM["Name"] == "crm-corosync": # return 1 return 0 class PartitionAudit(ClusterAudit): def __init__(self, cm): self.CM = cm self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} self.NodeEpoch = {} self.NodeState = {} self.NodeQuorum = {} def has_key(self, key): return key in self.Stats def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 def __call__(self): passed = 1 ccm_partitions = self.CM.find_partitions() if ccm_partitions == None or len(ccm_partitions) == 0: return 1 self.CM.cluster_stable(double_check=True) if len(ccm_partitions) != self.CM.partitions_expected: self.CM.log("ERROR: %d cluster partitions detected:" % len(ccm_partitions)) passed = 0 for partition in ccm_partitions: self.CM.log("\t %s" % partition) for partition in ccm_partitions: partition_passed = 0 if self.audit_partition(partition) == 0: passed = 0 return passed def trim_string(self, avalue): if not avalue: return None if len(avalue) > 1: return avalue[:-1] def trim2int(self, avalue): if not avalue: return None if len(avalue) > 1: return int(avalue[:-1]) def audit_partition(self, partition): passed = 1 dc_found = [] dc_allowed_list = [] lowest_epoch = None node_list = partition.split() self.debug("Auditing partition: %s" % (partition)) for node in node_list: if self.CM.ShouldBeStatus[node] != "up": self.CM.log("Warn: Node %s appeared out of nowhere" % (node)) self.CM.ShouldBeStatus[node] = "up" # not in itself a reason to fail the audit (not what we're # checking for in this audit) self.NodeState[node] = self.CM.rsh(node, self.CM["StatusCmd"] % node, 1) self.NodeEpoch[node] = self.CM.rsh(node, self.CM["EpochCmd"], 1) self.NodeQuorum[node] = self.CM.rsh(node, self.CM["QuorumCmd"], 1) self.debug("Node %s: %s - %s - %s." % (node, self.NodeState[node], self.NodeEpoch[node], self.NodeQuorum[node])) self.NodeState[node] = self.trim_string(self.NodeState[node]) self.NodeEpoch[node] = self.trim2int(self.NodeEpoch[node]) self.NodeQuorum[node] = self.trim_string(self.NodeQuorum[node]) if not self.NodeEpoch[node]: self.CM.log("Warn: Node %s dissappeared: cant determin epoch" % (node)) self.CM.ShouldBeStatus[node] = "down" # not in itself a reason to fail the audit (not what we're # checking for in this audit) elif lowest_epoch == None or self.NodeEpoch[node] < lowest_epoch: lowest_epoch = self.NodeEpoch[node] if not lowest_epoch: self.CM.log("Lowest epoch not determined in %s" % (partition)) passed = 0 for node in node_list: if self.CM.ShouldBeStatus[node] == "up": if self.CM.is_node_dc(node, self.NodeState[node]): dc_found.append(node) if self.NodeEpoch[node] == lowest_epoch: self.debug("%s: OK" % node) elif not self.NodeEpoch[node]: self.debug("Check on %s ignored: no node epoch" % node) elif not lowest_epoch: self.debug("Check on %s ignored: no lowest epoch" % node) else: self.CM.log("DC %s is not the oldest node (%d vs. %d)" % (node, self.NodeEpoch[node], lowest_epoch)) passed = 0 if len(dc_found) == 0: self.CM.log("DC not found on any of the %d allowed nodes: %s (of %s)" % (len(dc_allowed_list), str(dc_allowed_list), str(node_list))) elif len(dc_found) > 1: self.CM.log("%d DCs (%s) found in cluster partition: %s" % (len(dc_found), str(dc_found), str(node_list))) passed = 0 if passed == 0: for node in node_list: if self.CM.ShouldBeStatus[node] == "up": self.CM.log("epoch %s : %s" % (self.NodeEpoch[node], self.NodeState[node])) return passed def name(self): return "PartitionAudit" def is_applicable(self): # @TODO Due to long-ago refactoring, this name test would never match, # so this audit (and those derived from it) would never run. # Uncommenting the next lines fixes the name test, but that then # exposes pre-existing bugs that need to be fixed. #if self.CM["Name"] == "crm-corosync": # return 1 return 0 AllAuditClasses.append(DiskAudit) AllAuditClasses.append(FileAudit) AllAuditClasses.append(LogAudit) AllAuditClasses.append(ControllerStateAudit) AllAuditClasses.append(PartitionAudit) AllAuditClasses.append(PrimitiveAudit) AllAuditClasses.append(GroupAudit) AllAuditClasses.append(CloneAudit) AllAuditClasses.append(ColocationAudit) AllAuditClasses.append(CIBAudit) def AuditList(cm): result = [] for auditclass in AllAuditClasses: a = auditclass(cm) if a.is_applicable(): result.append(a) return result diff --git a/cts/CTSlab.py.in b/cts/CTSlab.py.in index 4bae935151..c81903050d 100644 --- a/cts/CTSlab.py.in +++ b/cts/CTSlab.py.in @@ -1,130 +1,127 @@ #!@PYTHON@ """ Command-line interface to Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - __copyright__ = "Copyright 2001-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import sys, signal, os pdir = os.path.dirname(sys.path[0]) sys.path.insert(0, pdir) # So that things work from the source directory try: from cts.CTSvars import * from cts.CM_corosync import * from cts.CTSaudits import AuditList from cts.CTStests import TestList from cts.CTSscenarios import * from cts.logging import LogFactory except ImportError as e: sys.stderr.write("abort: %s\n" % e) sys.stderr.write("check your install and PYTHONPATH; couldn't find cts libraries in:\n%s\n" % ' '.join(sys.path)) sys.exit(1) # These are globals so they can be used by the signal handler. scenario = None LogFactory().add_stderr() def sig_handler(signum, frame) : LogFactory().log("Interrupted by signal %d"%signum) if scenario: scenario.summarize() if signum == 15 : if scenario: scenario.TearDown() sys.exit(1) if __name__ == '__main__': Environment = CtsLab(sys.argv[1:]) NumIter = Environment["iterations"] if NumIter is None: NumIter = 1 Tests = [] # Set the signal handler signal.signal(15, sig_handler) signal.signal(10, sig_handler) # Create the Cluster Manager object cm = None if Environment["Stack"] == "corosync 2+": cm = crm_corosync(Environment) else: LogFactory().log("Unknown stack: "+Environment["stack"]) sys.exit(1) if Environment["TruncateLog"] == 1: if Environment["OutputFile"] is None: LogFactory().log("Ignoring truncate request because no output file specified") else: LogFactory().log("Truncating %s" % Environment["OutputFile"]) with open(Environment["OutputFile"], "w") as outputfile: outputfile.truncate(0) Audits = AuditList(cm) if Environment["ListTests"] == 1: Tests = TestList(cm, Audits) LogFactory().log("Total %d tests"%len(Tests)) for test in Tests : LogFactory().log(str(test.name)); sys.exit(0) elif len(Environment["tests"]) == 0: Tests = TestList(cm, Audits) else: Chosen = Environment["tests"] for TestCase in Chosen: match = None for test in TestList(cm, Audits): if test.name == TestCase: match = test if not match: LogFactory().log("--choose: No applicable/valid tests chosen") sys.exit(1) else: Tests.append(match) # Scenario selection if Environment["scenario"] == "basic-sanity": scenario = RandomTests(cm, [ BasicSanityCheck(Environment) ], Audits, Tests) elif Environment["scenario"] == "all-once": NumIter = len(Tests) scenario = AllOnce( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) elif Environment["scenario"] == "sequence": scenario = Sequence( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) elif Environment["scenario"] == "boot": scenario = Boot(cm, [ LeaveBooted(Environment)], Audits, []) else: scenario = RandomTests( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TESTS ") LogFactory().log("Stack: %s (%s)" % (Environment["Stack"], Environment["Name"])) LogFactory().log("Schema: %s" % Environment["Schema"]) LogFactory().log("Scenario: %s" % scenario.__doc__) LogFactory().log("CTS Exerciser: %s" % Environment["cts-exerciser"]) LogFactory().log("CTS Logfile: %s" % Environment["OutputFile"]) LogFactory().log("Random Seed: %s" % Environment["RandSeed"]) LogFactory().log("Syslog variant: %s" % Environment["syslogd"].strip()) LogFactory().log("System log files: %s" % Environment["LogFileName"]) if Environment.has_key("IPBase"): LogFactory().log("Base IP for resources: %s" % Environment["IPBase"]) LogFactory().log("Cluster starts at boot: %d" % Environment["at-boot"]) Environment.dump() rc = Environment.run(scenario, NumIter) sys.exit(rc) diff --git a/cts/CTSscenarios.py b/cts/CTSscenarios.py index b0ebb081bc..b411a6a406 100644 --- a/cts/CTSscenarios.py +++ b/cts/CTSscenarios.py @@ -1,601 +1,598 @@ """ Test scenario classes for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2000-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2000-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" from cts.CTS import * from cts.CTStests import CTSTest from cts.CTSaudits import ClusterAudit from cts.watcher import LogWatcher from cts.remote import input_wrapper class ScenarioComponent(object): def __init__(self, Env): self.Env = Env def IsApplicable(self): '''Return TRUE if the current ScenarioComponent is applicable in the given LabEnvironment given to the constructor. ''' raise ValueError("Abstract Class member (IsApplicable)") def SetUp(self, CM): '''Set up the given ScenarioComponent''' raise ValueError("Abstract Class member (Setup)") def TearDown(self, CM): '''Tear down (undo) the given ScenarioComponent''' raise ValueError("Abstract Class member (Setup)") class Scenario(object): ( '''The basic idea of a scenario is that of an ordered list of ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn, and then after the tests have been run, they are torn down using TearDown() (in reverse order). A Scenario is applicable to a particular cluster manager iff each ScenarioComponent is applicable. A partially set up scenario is torn down if it fails during setup. ''') def __init__(self, ClusterManager, Components, Audits, Tests): "Initialize the Scenario from the list of ScenarioComponents" self.ClusterManager = ClusterManager self.Components = Components self.Audits = Audits self.Tests = Tests self.BadNews = None self.TestSets = [] self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0} self.Sets = [] #self.ns=CTS.NodeStatus(self.Env) for comp in Components: if not issubclass(comp.__class__, ScenarioComponent): raise ValueError("Init value must be subclass of ScenarioComponent") for audit in Audits: if not issubclass(audit.__class__, ClusterAudit): raise ValueError("Init value must be subclass of ClusterAudit") for test in Tests: if not issubclass(test.__class__, CTSTest): raise ValueError("Init value must be a subclass of CTSTest") def IsApplicable(self): ( '''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable() ''' ) for comp in self.Components: if not comp.IsApplicable(): return None return 1 def SetUp(self): '''Set up the Scenario. Return TRUE on success.''' self.ClusterManager.prepare() self.audit() # Also detects remote/local log config self.ClusterManager.StatsMark(0) self.ClusterManager.ns.WaitForAllNodesToComeUp(self.ClusterManager.Env["nodes"]) self.audit() self.ClusterManager.install_support() self.BadNews = LogWatcher(self.ClusterManager.Env["LogFileName"], self.ClusterManager.templates.get_patterns( self.ClusterManager.Env["Name"], "BadNews"), "BadNews", 0, kind=self.ClusterManager.Env["LogWatcher"], hosts=self.ClusterManager.Env["nodes"]) self.BadNews.setwatch() # Call after we've figured out what type of log watching to do in LogAudit j = 0 while j < len(self.Components): if not self.Components[j].SetUp(self.ClusterManager): # OOPS! We failed. Tear partial setups down. self.audit() self.ClusterManager.log("Tearing down partial setup") self.TearDown(j) return None j = j + 1 self.audit() return 1 def TearDown(self, max=None): '''Tear Down the Scenario - in reverse order.''' if max == None: max = len(self.Components)-1 j = max while j >= 0: self.Components[j].TearDown(self.ClusterManager) j = j - 1 self.audit() self.ClusterManager.StatsExtract() self.ClusterManager.install_support("uninstall") def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 def run(self, Iterations): self.ClusterManager.oprofileStart() try: self.run_loop(Iterations) self.ClusterManager.oprofileStop() except: self.ClusterManager.oprofileStop() raise def run_loop(self, Iterations): raise ValueError("Abstract Class member (run_loop)") def run_test(self, test, testcount): nodechoice = self.ClusterManager.Env.RandomNode() ret = 1 where = "" did_run = 0 self.ClusterManager.StatsMark(testcount) self.ClusterManager.instance_errorstoignore_clear() self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) + "[" + ("%d" % testcount).rjust(3) + "]") starttime = test.set_timer() if not test.setup(nodechoice): self.ClusterManager.log("Setup failed") ret = 0 elif not test.canrunnow(nodechoice): self.ClusterManager.log("Skipped") test.skipped() else: did_run = 1 ret = test(nodechoice) if not test.teardown(nodechoice): self.ClusterManager.log("Teardown failed") if self.ClusterManager.Env["continue"] == 1: answer = "Y" else: try: answer = input_wrapper('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice)) ret = 0 stoptime = time.time() self.ClusterManager.oprofileSave(testcount) elapsed_time = stoptime - starttime test_time = stoptime - test.get_timer() if not test["min_time"]: test["elapsed_time"] = elapsed_time test["min_time"] = test_time test["max_time"] = test_time else: test["elapsed_time"] = test["elapsed_time"] + elapsed_time if test_time < test["min_time"]: test["min_time"] = test_time if test_time > test["max_time"]: test["max_time"] = test_time if ret: self.incr("success") test.log_timer() else: self.incr("failure") self.ClusterManager.statall() did_run = 1 # Force the test count to be incremented anyway so test extraction works self.audit(test.errorstoignore()) return did_run def summarize(self): self.ClusterManager.log("****************") self.ClusterManager.log("Overall Results:" + repr(self.Stats)) self.ClusterManager.log("****************") stat_filter = { "calls":0, "failure":0, "skipped":0, "auditfail":0, } self.ClusterManager.log("Test Summary") for test in self.Tests: for key in list(stat_filter.keys()): stat_filter[key] = test.Stats[key] self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter)) self.ClusterManager.debug("Detailed Results") for test in self.Tests: self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats)) self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED") def audit(self, LocalIgnore=[]): errcount = 0 ignorelist = [] ignorelist.append("CTS:") ignorelist.extend(LocalIgnore) ignorelist.extend(self.ClusterManager.errorstoignore()) ignorelist.extend(self.ClusterManager.instance_errorstoignore()) # This makes sure everything is stabilized before starting... failed = 0 for audit in self.Audits: if not audit(): self.ClusterManager.log("Audit " + audit.name() + " FAILED.") failed += 1 else: self.ClusterManager.debug("Audit " + audit.name() + " passed.") while errcount < 1000: match = None if self.BadNews: match = self.BadNews.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.ClusterManager.log("BadNews: " + match) self.incr("BadNews") errcount = errcount + 1 else: break else: if self.ClusterManager.Env["continue"] == 1: answer = "Y" else: try: answer = input_wrapper('Big problems. Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": self.ClusterManager.log("Shutting down.") self.summarize() self.TearDown() raise ValueError("Looks like we hit a BadNews jackpot!") if self.BadNews: self.BadNews.end() return failed class AllOnce(Scenario): '''Every Test Once''' # Accessable as __doc__ def run_loop(self, Iterations): testcount = 1 for test in self.Tests: self.run_test(test, testcount) testcount += 1 class RandomTests(Scenario): '''Random Test Execution''' def run_loop(self, Iterations): testcount = 1 while testcount <= Iterations: test = self.ClusterManager.Env.RandomGen.choice(self.Tests) self.run_test(test, testcount) testcount += 1 class BasicSanity(Scenario): '''Basic Cluster Sanity''' def run_loop(self, Iterations): testcount = 1 while testcount <= Iterations: test = self.Environment.RandomGen.choice(self.Tests) self.run_test(test, testcount) testcount += 1 class Sequence(Scenario): '''Named Tests in Sequence''' def run_loop(self, Iterations): testcount = 1 while testcount <= Iterations: for test in self.Tests: self.run_test(test, testcount) testcount += 1 class Boot(Scenario): '''Start the Cluster''' def run_loop(self, Iterations): testcount = 0 class BootCluster(ScenarioComponent): ( '''BootCluster is the most basic of ScenarioComponents. This ScenarioComponent simply starts the cluster manager on all the nodes. It is fairly robust as it waits for all nodes to come up before starting as they might have been rebooted or crashed for some reason beforehand. ''') def __init__(self, Env): pass def IsApplicable(self): '''BootCluster is so generic it is always Applicable''' return 1 def SetUp(self, CM): '''Basic Cluster Manager startup. Start everything''' CM.prepare() # Clear out the cobwebs ;-) CM.stopall(verbose=True, force=True) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on all nodes.") return CM.startall(verbose=True, quick=True) def TearDown(self, CM, force=False): '''Set up the given ScenarioComponent''' # Stop the cluster manager everywhere CM.log("Stopping Cluster Manager on all nodes") return CM.stopall(verbose=True, force=force) class LeaveBooted(BootCluster): def TearDown(self, CM): '''Set up the given ScenarioComponent''' # Stop the cluster manager everywhere CM.log("Leaving Cluster running on all nodes") return 1 class PingFest(ScenarioComponent): ( '''PingFest does a flood ping to each node in the cluster from the test machine. If the LabEnvironment Parameter PingSize is set, it will be used as the size of ping packet requested (via the -s option). If it is not set, it defaults to 1024 bytes. According to the manual page for ping: Outputs packets as fast as they come back or one hundred times per second, whichever is more. For every ECHO_REQUEST sent a period ``.'' is printed, while for every ECHO_REPLY received a backspace is printed. This provides a rapid display of how many packets are being dropped. Only the super-user may use this option. This can be very hard on a net- work and should be used with caution. ''' ) def __init__(self, Env): self.Env = Env def IsApplicable(self): '''PingFests are always applicable ;-) ''' return 1 def SetUp(self, CM): '''Start the PingFest!''' self.PingSize = 1024 if "PingSize" in list(CM.Env.keys()): self.PingSize = CM.Env["PingSize"] CM.log("Starting %d byte flood pings" % self.PingSize) self.PingPids = [] for node in CM.Env["nodes"]: self.PingPids.append(self._pingchild(node)) CM.log("Ping PIDs: " + repr(self.PingPids)) return 1 def TearDown(self, CM): '''Stop it right now! My ears are pinging!!''' for pid in self.PingPids: if pid != None: CM.log("Stopping ping process %d" % pid) os.kill(pid, signal.SIGKILL) def _pingchild(self, node): Args = ["ping", "-qfn", "-s", str(self.PingSize), node] sys.stdin.flush() sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid < 0: self.Env.log("Cannot fork ping child") return None if pid > 0: return pid # Otherwise, we're the child process. os.execvp("ping", Args) self.Env.log("Cannot execvp ping: " + repr(Args)) sys.exit(1) class PacketLoss(ScenarioComponent): ( ''' It would be useful to do some testing of CTS with a modest amount of packet loss enabled - so we could see that everything runs like it should with a certain amount of packet loss present. ''') def IsApplicable(self): '''always Applicable''' return 1 def SetUp(self, CM): '''Reduce the reliability of communications''' if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 : return 1 for node in CM.Env["nodes"]: CM.reducecomm_node(node) CM.log("Reduce the reliability of communications") return 1 def TearDown(self, CM): '''Fix the reliability of communications''' if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 : return 1 for node in CM.Env["nodes"]: CM.unisolate_node(node) CM.log("Fix the reliability of communications") class BasicSanityCheck(ScenarioComponent): ( ''' ''') def IsApplicable(self): return self.Env["DoBSC"] def SetUp(self, CM): CM.prepare() # Clear out the cobwebs self.TearDown(CM) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on BSC node(s).") return CM.startall() def TearDown(self, CM): CM.log("Stopping Cluster Manager on BSC node(s).") return CM.stopall() class Benchmark(ScenarioComponent): ( ''' ''') def IsApplicable(self): return self.Env["benchmark"] def SetUp(self, CM): CM.prepare() # Clear out the cobwebs self.TearDown(CM, force=True) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on all node(s).") return CM.startall() def TearDown(self, CM): CM.log("Stopping Cluster Manager on all node(s).") return CM.stopall() class RollingUpgrade(ScenarioComponent): ( ''' Test a rolling upgrade between two versions of the stack ''') def __init__(self, Env): self.Env = Env def IsApplicable(self): if not self.Env["rpm-dir"]: return None if not self.Env["current-version"]: return None if not self.Env["previous-version"]: return None return 1 def install(self, node, version): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version) rc = self.CM.rsh(node, "mkdir -p %s" % target_dir) rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir)) rc = self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir)) return self.success() def upgrade(self, node): return self.install(node, self.CM.Env["current-version"]) def downgrade(self, node): return self.install(node, self.CM.Env["previous-version"]) def SetUp(self, CM): print(repr(self)+"prepare") CM.prepare() # Clear out the cobwebs CM.stopall(force=True) CM.log("Downgrading all nodes to %s." % self.Env["previous-version"]) for node in self.Env["nodes"]: if not self.downgrade(node): CM.log("Couldn't downgrade %s" % node) return None return 1 def TearDown(self, CM): # Stop everything CM.log("Stopping Cluster Manager on Upgrade nodes.") CM.stopall() CM.log("Upgrading all nodes to %s." % self.Env["current-version"]) for node in self.Env["nodes"]: if not self.upgrade(node): CM.log("Couldn't upgrade %s" % node) return None return 1 diff --git a/cts/CTStests.py b/cts/CTStests.py index 8b92d0429a..3cb5fae310 100644 --- a/cts/CTStests.py +++ b/cts/CTStests.py @@ -1,3127 +1,3124 @@ """ Test-specific classes for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2000-2019 the Pacemaker project contributors" +__copyright__ = "Copyright 2000-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import os import re import time import subprocess import tempfile from stat import * from cts import CTS from cts.CTSaudits import * from cts.CTSvars import * from cts.patterns import PatternSelector from cts.logging import LogFactory from cts.remote import RemoteFactory, input_wrapper from cts.watcher import LogWatcher from cts.environment import EnvFactory AllTestClasses = [ ] class CTSTest(object): ''' A Cluster test. We implement the basic set of properties and behaviors for a generic cluster test. Cluster tests track their own statistics. We keep each of the kinds of counts we track as separate {name,value} pairs. ''' def __init__(self, cm): #self.name="the unnamed test" self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} # if not issubclass(cm.__class__, ClusterManager): # raise ValueError("Must be a ClusterManager object") self.CM = cm self.Env = EnvFactory().getInstance() self.rsh = RemoteFactory().getInstance() self.logger = LogFactory() self.templates = PatternSelector(cm["Name"]) self.Audits = [] self.timeout = 120 self.passed = 1 self.is_loop = 0 self.is_unsafe = 0 self.is_docker_unsafe = 0 self.is_experimental = 0 self.is_container = 0 self.is_valgrind = 0 self.benchmark = 0 # which tests to benchmark self.timer = {} # timers def log(self, args): self.logger.log(args) def debug(self, args): self.logger.debug(args) def has_key(self, key): return key in self.Stats def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): if str(key) == "0": raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead") if key in self.Stats: return self.Stats[key] return None def log_mark(self, msg): self.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) return def get_timer(self,key = "test"): try: return self.timer[key] except: return 0 def set_timer(self,key = "test"): self.timer[key] = time.time() return self.timer[key] def log_timer(self,key = "test"): elapsed = 0 if key in self.timer: elapsed = time.time() - self.timer[key] s = key == "test" and self.name or "%s:%s" % (self.name,key) self.debug("%s runtime: %.2f" % (s, elapsed)) del self.timer[key] return elapsed def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 # Reset the test passed boolean if name == "calls": self.passed = 1 def failure(self, reason="none"): '''Increment the failure count''' self.passed = 0 self.incr("failure") self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason) return None def success(self): '''Increment the success count''' self.incr("success") return 1 def skipped(self): '''Increment the skipped count''' self.incr("skipped") return 1 def __call__(self, node): '''Perform the given test''' raise ValueError("Abstract Class member (__call__)") self.incr("calls") return self.failure() def audit(self): passed = 1 if len(self.Audits) > 0: for audit in self.Audits: if not audit(): self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) self.incr("auditfail") passed = 0 return passed def setup(self, node): '''Setup the given test''' return self.success() def teardown(self, node): '''Tear down the given test''' return self.success() def create_watch(self, patterns, timeout, name=None): if not name: name = self.name return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"]) def local_badnews(self, prefix, watch, local_ignore=[]): errcount = 0 if not prefix: prefix = "LocalBadNews:" ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append(prefix) ignorelist.extend(local_ignore) while errcount < 100: match = watch.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.logger.log(prefix + " " + match) errcount = errcount + 1 else: break else: self.logger.log("Too many errors!") watch.end() return errcount def is_applicable(self): return self.is_applicable_common() def is_applicable_common(self): '''Return TRUE if we are applicable in the current test configuration''' #raise ValueError("Abstract Class member (is_applicable)") if self.is_loop and not self.Env["loop-tests"]: return 0 elif self.is_unsafe and not self.Env["unsafe-tests"]: return 0 elif self.is_valgrind and not self.Env["valgrind-tests"]: return 0 elif self.is_experimental and not self.Env["experimental-tests"]: return 0 elif self.is_docker_unsafe and self.Env["docker"]: return 0 elif self.is_container and not self.Env["container-tests"]: return 0 elif self.Env["benchmark"] and self.benchmark == 0: return 0 return 1 def find_ocfs2_resources(self, node): self.r_o2cb = None self.r_ocfs2 = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "o2cb" and r.parent != "NA": self.debug("Found o2cb: %s" % self.r_o2cb) self.r_o2cb = r.parent if re.search("^Constraint", line): c = AuditConstraint(self.CM, line) if c.type == "rsc_colocation" and c.target == self.r_o2cb: self.r_ocfs2.append(c.rsc) self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) return len(self.r_ocfs2) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' return 1 def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] class StopTest(CTSTest): '''Stop (deactivate) the cluster manager on a node''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stop" def __call__(self, node): '''Perform the 'stop' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] != "up": return self.skipped() patterns = [] # Technically we should always be able to notice ourselves stopping patterns.append(self.templates["Pat:We_stopped"] % node) # Any active node needs to notice this one left # (note that this won't work if we have multiple partitions) for other in self.Env["nodes"]: if self.CM.ShouldBeStatus[other] == "up" and other != node: patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) #self.debug("Checking %s will notice %s left"%(other, node)) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() if node == self.CM.OurNode: self.incr("us") else: if self.CM.upcount() <= 1: self.incr("all") else: self.incr("them") self.CM.StopaCM(node) watch_result = watch.lookforall() failreason = None UnmatchedList = "||" if watch.unmatched: (rc, output) = self.rsh(node, "/bin/ps axf", None) for line in output: self.debug(line) (rc, output) = self.rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", None) for line in output: self.debug(line) for regex in watch.unmatched: self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex)) UnmatchedList += regex + "||"; failreason = "Missing shutdown pattern" self.CM.cluster_stable(self.Env["DeadTime"]) if not watch.unmatched or self.CM.upcount() == 0: return self.success() if len(watch.unmatched) >= self.CM.upcount(): return self.failure("no match against (%s)" % UnmatchedList) if failreason == None: return self.success() else: return self.failure(failreason) # # We don't register StopTest because it's better when called by # another test... # class StartTest(CTSTest): '''Start (activate) the cluster manager on a node''' def __init__(self, cm, debug=None): CTSTest.__init__(self,cm) self.name = "start" self.debug = debug def __call__(self, node): '''Perform the 'start' test. ''' self.incr("calls") if self.CM.upcount() == 0: self.incr("us") else: self.incr("them") if self.CM.ShouldBeStatus[node] != "down": return self.skipped() elif self.CM.StartaCM(node): return self.success() else: return self.failure("Startup %s on node %s failed" % (self.Env["Name"], node)) # # We don't register StartTest because it's better when called by # another test... # class FlipTest(CTSTest): '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Flip" self.start = StartTest(cm) self.stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] == "up": self.incr("stopped") ret = self.stop(node) type = "up->down" # Give the cluster time to recognize it's gone... time.sleep(self.Env["StableTime"]) elif self.CM.ShouldBeStatus[node] == "down": self.incr("started") ret = self.start(node) type = "down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) class RestartTest(CTSTest): '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Restart" self.start = StartTest(cm) self.stop = StopTest(cm) self.benchmark = 1 def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 if self.CM.StataCM(node): self.incr("WasStopped") if not self.start(node): return self.failure("start (setup) failure: "+node) self.set_timer() if not self.stop(node): return self.failure("stop failure: "+node) if not self.start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) class StonithdTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stonithd" self.startall = SimulStartLite(cm) self.benchmark = 1 def __call__(self, node): self.incr("calls") if len(self.Env["nodes"]) < 2: return self.skipped() ret = self.startall(None) if not ret: return self.failure("Setup failed") is_dc = self.CM.is_node_dc(node) watchpats = [] watchpats.append(self.templates["Pat:FenceOpOK"] % node) watchpats.append(self.templates["Pat:NodeFenced"] % node) if self.Env["at-boot"] == 0: self.debug("Expecting %s to stay down" % node) self.CM.ShouldBeStatus[node] = "down" else: self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"])) watchpats.append("%s.* S_STARTING -> S_PENDING" % node) watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node) watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() origin = self.Env.RandomGen.choice(self.Env["nodes"]) rc = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node) if rc == 194: # 194 - 256 = -62 = Timer expired # # Look for the patterns, usually this means the required # device was running on the node to be fenced - or that # the required devices were in the process of being loaded # and/or moved # # Effectively the node committed suicide so there will be # no confirmation, but pacemaker should be watching and # fence the node again self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node)) elif origin != node and rc != 0: self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc)) elif origin == node and rc != 255: # 255 == broken pipe, ie. the node was fenced as expected self.logger.log("Locally originated fencing returned %d" % rc) self.set_timer("fence") matched = watch.lookforall() self.log_timer("fence") self.set_timer("reform") if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") self.log_timer("reform") return self.success() def errorstoignore(self): return [ self.templates["Pat:Fencing_start"] % ".*", self.templates["Pat:Fencing_ok"] % ".*", self.templates["Pat:Fencing_active"], r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired", ] def is_applicable(self): if not self.is_applicable_common(): return 0 if "DoFencing" in list(self.Env.keys()): return self.Env["DoFencing"] return 1 AllTestClasses.append(StonithdTest) class StartOnebyOne(CTSTest): '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StartOnebyOne" self.stopall = SimulStopLite(cm) self.start = StartTest(cm) self.ns = CTS.NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) class SimulStart(CTSTest): '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStart" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") if not self.startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) class SimulStop(CTSTest): '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStop" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) class StopOnebyOne(CTSTest): '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StopOnebyOne" self.startall = SimulStartLite(cm) self.stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) class RestartOnebyOne(CTSTest): '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RestartOnebyOne" self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") did_fail = [] self.set_timer() self.restart = RestartTest(self.CM) for node in self.Env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" % (len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) class PartialStart(CTSTest): '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "PartialStart" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) self.stop = StopTest(cm) #self.is_unsafe = 1 def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") watchpats = [] watchpats.append("pacemaker-controld.*Connecting to .* cluster infrastructure") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.CM.StartaCMnoBlock(node) ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) ret = self.stop(node) if not ret: return self.failure("%s did not stop in time" % node) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # We might do some fencing in the 2-node case if we make it up far enough return [ r"Executing reboot fencing operation", r"Requesting fencing \([^)]+\) of node ", ] # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) class StandbyTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Standby" self.benchmark = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") self.debug("Make sure node %s is active" % node) if self.CM.StandbyStatus(node) != "off": if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.debug("Getting resources running on node %s" % node) rsc_on_node = self.CM.active_resources(node) watchpats = [] watchpats.append(r"State transition .* -> S_POLICY_ENGINE") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.debug("Setting node %s to standby mode" % node) if not self.CM.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) self.set_timer("on") ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.CM.SetStandbyMode(node, "off") return self.failure("cluster didn't react to standby change on %s" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.log_timer("on") self.debug("Checking resources") bad_run = self.CM.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.debug("Setting node %s to active mode" % node) self.CM.SetStandbyMode(node, "off") return rc self.debug("Setting node %s to active mode" % node) if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.set_timer("off") self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.log_timer("off") return self.success() AllTestClasses.append(StandbyTest) class ValgrindTest(CTSTest): '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Valgrind" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_valgrind = 1 self.is_loop = 1 def setup(self, node): self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind, # and clear any valgrind logs from previous runs. For now, we rely on # the user to do this manually. ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") return self.success() def teardown(self, node): # Return all nodes to normal # @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks # (no longer used but kept in case feature is restored) leaked = [] self.stop = StopTest(self.CM) for node in self.Env["nodes"]: rc = self.stop(node) if not rc: self.failure("Couldn't shut down %s" % node) rc = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat, 0) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) (rc, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, None) for line in output: self.logger.log(line) (rc, output) = self.rsh(node, "cat %s" % self.logger.logPat, None) for line in output: self.debug(line) self.rsh(node, "rm -f %s" % self.logger.logPat, None) return leaked def __call__(self, node): #leaked = self.find_leaks() #if len(leaked) > 0: # return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*", r"pacemaker-based.*: .* avoid confusing Valgrind", r"HA_VALGRIND_ENABLED", ] class StandbyLoopTest(ValgrindTest): '''Check for memory leaks by putting a node in and out of standby for an hour''' # @TODO This is not a useful test for memory leaks def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name = "StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 done = time.time() + self.Env["loop-minutes"] * 60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) if not self.CM.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) if not self.CM.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() #AllTestClasses.append(StandbyLoopTest) class BandwidthTest(CTSTest): # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which the cluster uses''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Bandwidth" self.start = StartTest(cm) self.__setitem__("min",0) self.__setitem__("max",0) self.__setitem__("totalbandwidth",0) (handle, self.tempfile) = tempfile.mkstemp(".cts") os.close(handle) self.startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") if self.CM.upcount() < 1: return self.skipped() Path = self.CM.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) ret = self.startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) rc = self.rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) self.rsh.cp(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: self.logger.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) self.logger.log("...bandwidth: %d bits/sec" % intband) self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth if self.Stats["min"] == 0: self.Stats["min"] = Bandwidth if Bandwidth > self.Stats["max"]: self.Stats["max"] = Bandwidth if Bandwidth < self.Stats["min"]: self.Stats["min"] = Bandwidth self.rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count + 1 linesplit = line.split(" ") for j in range(len(linesplit)-1): if linesplit[j] == "udp": break if linesplit[j] == "length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = T1.split(":") time2split = timesplit[2].split(".") time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = line.split(" ") for j in range(len(linessplit)-1): if linessplit[j] == "udp": break if linessplit[j] == "length:": break try: sum = int(linessplit[j+1]) + sum except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = T2.split(":") time2split = timesplit[2].split(".") time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return int((sum*8)/time) def is_applicable(self): '''BandwidthTest never applicable''' return 0 AllTestClasses.append(BandwidthTest) ################################################################### class MaintenanceMode(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "MaintenanceMode" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 #self.is_unsafe = 1 self.benchmark = 1 self.action = "asyncmon" self.interval = 0 self.rid = "maintenanceDummy" def toggleMaintenanceMode(self, node, action): pats = [] pats.append(self.templates["Pat:DC_IDLE"]) # fail the resource right after turning Maintenance mode on # verify it is not recovered until maintenance mode is turned off if action == "On": pats.append(self.templates["Pat:RscOpFail"] % (self.action, self.rid)) else: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) watch = self.create_watch(pats, 60) watch.setwatch() self.debug("Turning maintenance mode %s" % action) self.rsh(node, self.templates["MaintenanceMode%s" % (action)]) if (action == "On"): self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover%s" % (action)) watch.lookforall() self.log_timer("recover%s" % (action)) if watch.unmatched: self.debug("Failed to find patterns when turning maintenance mode %s" % action) return repr(watch.unmatched) return "" def insertMaintenanceDummy(self, node): pats = [] pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid))) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.AddDummyRsc(node, self.rid) self.set_timer("addDummy") watch.lookforall() self.log_timer("addDummy") if watch.unmatched: self.debug("Failed to find patterns when adding maintenance dummy resource") return repr(watch.unmatched) return "" def removeMaintenanceDummy(self, node): pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.RemoveDummyRsc(node, self.rid) self.set_timer("removeDummy") watch.lookforall() self.log_timer("removeDummy") if watch.unmatched: self.debug("Failed to find patterns when removing maintenance dummy resource") return repr(watch.unmatched) return "" def managedRscList(self, node): rscList = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.managed(): rscList.append(tmp.id) return rscList def verifyResources(self, node, rscList, managed): managedList = list(rscList) managed_str = "managed" if not managed: managed_str = "unmanaged" (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if managed and not tmp.managed(): continue elif not managed and tmp.managed(): continue elif managedList.count(tmp.id): managedList.remove(tmp.id) if len(managedList) == 0: self.debug("Found all %s resources on %s" % (managed_str, node)) return True self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList)) return False def __call__(self, node): '''Perform the 'MaintenanceMode' test. ''' self.incr("calls") verify_managed = False verify_unmanaged = False failPat = "" ret = self.startall(None) if not ret: return self.failure("Setup failed") # get a list of all the managed resources. We use this list # after enabling maintenance mode to verify all managed resources # become un-managed. After maintenance mode is turned off, we use # this list to verify all the resources become managed again. managedResources = self.managedRscList(node) if len(managedResources) == 0: self.logger.log("No managed resources on %s" % node) return self.skipped() # insert a fake resource we can fail during maintenance mode # so we can verify recovery does not take place until after maintenance # mode is disabled. failPat = failPat + self.insertMaintenanceDummy(node) # toggle maintenance mode ON, then fail dummy resource. failPat = failPat + self.toggleMaintenanceMode(node, "On") # verify all the resources are now unmanaged if self.verifyResources(node, managedResources, False): verify_unmanaged = True # Toggle maintenance mode OFF, verify dummy is recovered. failPat = failPat + self.toggleMaintenanceMode(node, "Off") # verify all the resources are now managed again if self.verifyResources(node, managedResources, True): verify_managed = True # Remove our maintenance dummy resource. failPat = failPat + self.removeMaintenanceDummy(node) self.CM.cluster_stable() if failPat != "": return self.failure("Unmatched patterns: %s" % (failPat)) elif verify_unmanaged is False: return self.failure("Failed to verify resources became unmanaged during maintenance mode") elif verify_managed is False: return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"schedulerd.*: Recover %s\s*\(.*\)" % self.rid, r"Unknown operation: fail", self.templates["Pat:RscOpOK"] % (self.action, self.rid), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(MaintenanceMode) class ResourceRecover(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ResourceRecover" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 self.rid = None self.rid_alt = None #self.is_unsafe = 1 self.benchmark = 1 # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed") resourcelist = self.CM.active_resources(node) # if there are no resourcelist, return directly if len(resourcelist) == 0: self.logger.log("No active resources on %s" % node) return self.skipped() self.rid = self.Env.RandomGen.choice(resourcelist) self.rid_alt = self.rid rsc = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.id == self.rid: rsc = tmp # Handle anonymous clones that get renamed self.rid = rsc.clone_id break if not rsc: return self.failure("Could not find %s in the resource list" % self.rid) self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) pats = [] pats.append(self.templates["Pat:CloneOpFail"] % (self.action, rsc.id, rsc.clone_id)) if rsc.managed(): pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) if rsc.unique(): pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) else: # Anonymous clones may get restarted with a different clone number pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) watch = self.create_watch(pats, 60) watch.setwatch() self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover") watch.lookforall() self.log_timer("recover") self.CM.cluster_stable() recovered = self.CM.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique() and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.debug("%s is running on: %s" % (self.rid, repr(recovered))) elif rsc.managed(): return self.failure("%s was not recovered and is inactive" % self.rid) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"schedulerd.*: Recover (%s|%s)\s*\(.*\)" % (self.rid, self.rid_alt), r"Unknown operation: fail", self.templates["Pat:RscOpOK"] % (self.action, self.rid), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(ResourceRecover) class ComponentFail(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ComponentFail" # TODO make this work correctly in docker. self.is_docker_unsafe = 1 self.startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] self.is_unsafe = 1 def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.CM.cluster_stable(self.Env["StableTime"]): return self.failure("Setup failed - unstable") node_is_dc = self.CM.is_node_dc(node, None) # select a component to kill chosen = self.Env.RandomGen.choice(self.complist) while chosen.dc_only == 1 and node_is_dc == 0: chosen = self.Env.RandomGen.choice(self.complist) self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot)) self.incr(chosen.name) if chosen.name != "corosync": self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) # @TODO this should be a flag in the Component if chosen.name in [ "corosync", "pacemaker-based", "pacemaker-fenced" ]: # Ignore actions for fence devices if fencer will respawn # (their registration will be lost, and probes will fail) self.okerrpatterns = [ self.templates["Pat:Fencing_active"] ] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id) self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id) # supply a copy so self.patterns doesn't end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append(self.templates["Pat:Fencing_ok"] % node) stonith = self.create_watch(stonithPats, 0) stonith.setwatch() # set the watch for stable watch = self.create_watch( tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() # kill the component chosen.kill(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for any fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") self.CM.cluster_stable(self.Env["StartTime"]) self.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.debug("Found: " + repr(shot)) self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node) if self.Env["at-boot"] == 0: self.CM.ShouldBeStatus[node] = "down" # If fencing occurred, chances are many (if not all) the expected logs # will not be sent - or will be lost when the node reboots return self.success() # check for logs indicating a graceful recovery matched = watch.lookforall(allow_multiple_matches=1) if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected %s patterns" % chosen.name) elif not is_stable: return self.failure("Cluster did not become stable after killing %s" % chosen.name) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Note that okerrpatterns refers to the last time we ran this test # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) class SplitBrainTest(CTSTest): '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.is_experimental = 1 def isolate_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.debug("Creating partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: if not self.CM.isolate_node(node, other_nodes): self.logger.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"])) if len(other_nodes) == 0: return 1 self.debug("Healing partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: self.CM.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") self.passed = 1 partitions = {} ret = self.startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} p_max = len(self.Env["nodes"]) for node in self.Env["nodes"]: p = self.Env.RandomGen.randint(1, p_max) if not p in partitions: partitions[p] = [] partitions[p].append(node) p_max = len(list(partitions.keys())) if p_max > 1: break # else, try again self.debug("Created %d partitions" % p_max) for key in list(partitions.keys()): self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now self.rsh(node, "crm_attribute -V -n stonith-enabled -v false") for key in list(partitions.keys()): self.isolate_partition(partitions[key]) count = 30 while count > 0: if len(self.CM.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability if not self.CM.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state self.CM.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") self.CM.partitions_expected = 1 # And heal them again for key in list(partitions.keys()): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: if len(self.CM.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] partitions = self.CM.find_partitions() if len(partitions) > 0: members = partitions[0].split() if len(members) != len(self.Env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state if not self.CM.cluster_stable(1200): self.failure("Reformed cluster not stable") if self.Env["continue"] == 1: answer = "Y" else: try: answer = input_wrapper('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on if self.Env["DoFencing"]: self.rsh(node, "crm_attribute -V -D -n stonith-enabled") self.CM.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [ r"Another DC detected:", r"(ERROR|error).*: .*Application of an update diff failed", r"pacemaker-controld.*:.*not in our membership list", r"CRIT:.*node.*returning after partition", ] def is_applicable(self): if not self.is_applicable_common(): return 0 return len(self.Env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) class Reattach(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Reattach" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) self.is_unsafe = 0 # Handled by canrunnow() def _is_managed(self, node): is_managed = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", 1) is_managed = is_managed[:-1] # Strip off the newline return is_managed == "true" def _set_unmanaged(self, node): self.debug("Disable resource management") self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false") def _set_managed(self, node): self.debug("Re-enable resource management") self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D") def setup(self, node): attempt = 0 if not self.startall(None): return None # Make sure we are really _really_ stable and that all # resources, including those that depend on transient node # attributes, are started while not self.CM.cluster_stable(double_check=True): if attempt < 5: attempt += 1 self.debug("Not stable yet, re-testing") else: self.logger.log("Cluster is not stable") return None return 1 def teardown(self, node): # Make sure 'node' is up start = StartTest(self.CM) start(node) if not self._is_managed(node): self.logger.log("Attempting to re-enable resource management on %s" % node) self._set_managed(node) self.CM.cluster_stable() if not self._is_managed(node): self.logger.log("Could not re-enable resource management") return 0 return 1 def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' if self.find_ocfs2_resources(node): self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present") return 0 return 1 def __call__(self, node): self.incr("calls") pats = [] # Conveniently, the scheduler will display this message when disabling # management, even if fencing is not enabled, so we can rely on it. managed = self.create_watch(["Delaying fencing operations"], 60) managed.setwatch() self._set_unmanaged(node) if not managed.lookforall(): self.logger.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*")) watch = self.create_watch(pats, 60, "ShutdownActivity") watch.setwatch() self.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self._set_managed(node) return self.failure("Couldn't shut down the cluster") self.debug("Bringing the cluster back up") ret = self.startall(None) time.sleep(5) # allow ping to update the CIB if not ret: self._set_managed(node) return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self._set_managed(node) return self.failure("Resources stopped or started during cluster restart") watch = self.create_watch(pats, 60, "StartupActivity") watch.setwatch() # Re-enable resource management (and verify it happened). self._set_managed(node) self.CM.cluster_stable() if not self._is_managed(node): return self.failure("Could not re-enable resource management") # Ignore actions for STONITH resources ignore = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.debug("Ignoring start actions for %s" % r.id) ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id)) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"resource( was|s were) active at shutdown", ] def is_applicable(self): return 1 AllTestClasses.append(Reattach) class SpecialTest1(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SpecialTest1" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Could not stop all nodes") # Test config recovery when the other nodes come up self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*") # Start the selected node ret = self.restart1(node) if not ret: return self.failure("Could not start "+node) # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Could not start the remaining nodes") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Errors that occur as a result of the CIB being wiped return [ r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed", r"error.*: Resource start-up disabled since no STONITH resources have been defined", r"error.*: Either configure some or disable STONITH with the stonith-enabled option", r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity", ] AllTestClasses.append(SpecialTest1) class HAETest(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "HAETest" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_loop = 1 def setup(self, node): # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active = 0 (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.debug("Resource %s is still inactive" % resource) elif rc == 234: self.logger.log("Unknown resource %s" % resource) return 0 elif rc == 246: self.logger.log("Cluster is inactive") return 0 elif rc != 0: self.logger.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "controld" and r.parent != "NA": self.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None self.r_o2cb = None self.r_ocfs2 = [] if self.find_dlm(node): self.find_ocfs2_resources(node) def is_applicable(self): if not self.is_applicable_common(): return 0 if self.Env["Schema"] == "hae": return 1 return None class HAERoleTest(HAETest): def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name = "HAERoleTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) class HAEStandbyTest(HAETest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name = "HAEStandbyTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) class NearQuorumPointTest(CTSTest): ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint") #decide what to do with each node for node in self.Env["nodes"]: action = self.Env.RandomGen.choice(["start","stop"]) #action = self.Env.RandomGen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.debug("start nodes:" + repr(startset)) self.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: if self.CM.ShouldBeStatus[node] == "up": watchpats.append(self.templates["Pat:We_stopped"] % node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": #watchpats.append(self.templates["Pat:NonDC_started"] % node) watchpats.append(self.templates["Pat:Local_started"] % node) else: for stopping in stopset: if self.CM.ShouldBeStatus[stopping] == "up": watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() #begin actions for node in stopset: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) #get the result if watch.lookforall(): self.CM.cluster_stable() self.CM.fencing_cleanup("NearQuorumPoint", stonith) return self.success() self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: if self.CM.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: if self.CM.StataCM(node) == 0: downnodes.append(node) self.CM.fencing_cleanup("NearQuorumPoint", stonith) if upnodes == [] and downnodes == []: self.CM.cluster_stable() # Make sure they're completely down with no residule for node in stopset: self.rsh(node, self.templates["StopCmd"]) return self.success() if len(upnodes) > 0: self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: self.logger.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() def is_applicable(self): return 1 AllTestClasses.append(NearQuorumPointTest) class RollingUpgradeTest(CTSTest): '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RollingUpgrade" self.start = StartTest(cm) self.stop = StopTest(cm) self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.Env["rpm-dir"], version) self.logger.log("Installing %s on %s with %s" % (version, node, flags)) if not self.stop(node): return self.failure("stop failure: "+node) rc = self.rsh(node, "mkdir -p %s" % target_dir) rc = self.rsh(node, "rm -f %s/*.rpm" % target_dir) (rc, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, None) for line in lines: line = line[:-1] rc = self.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir)) rc = self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) if start and not self.start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): return self.install(node, self.Env["current-version"], start) def downgrade(self, node, start=1): return self.install(node, self.Env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") for node in self.Env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) self.CM.cluster_stable() return self.success() def is_applicable(self): if not self.is_applicable_common(): return None if not "rpm-dir" in list(self.Env.keys()): return None if not "current-version" in list(self.Env.keys()): return None if not "previous-version" in list(self.Env.keys()): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) class BSC_AddResource(CTSTest): '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "AddResource" self.resource_offset = 0 self.cib_cmd = """cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() ip = self.NextIP() if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") if not self.CM.cluster_stable(self.Env["DeadTime"]): return self.failure("Unstable cluster") return self.success() def NextIP(self): ip = self.Env["IPBase"] if ":" in ip: fields = ip.rpartition(":") fields[2] = str(hex(int(fields[2], 16)+1)) print(str(hex(int(f[2], 16)+1))) else: fields = ip.rpartition('.') fields[2] = str(int(fields[2])+1) ip = fields[0] + fields[1] + fields[3]; self.Env["IPBase"] = ip return ip.strip() def make_ip_resource(self, node, id, rclass, type, ip): self.logger.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint = """ """ % (id, id, id, id, node) rc = 0 (rc, lines) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), None) if rc != 0: self.logger.log("Constraint creation failed: %d" % rc) return None (rc, lines) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), None) if rc != 0: self.logger.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): if self.Env["DoBSC"]: return 1 return None AllTestClasses.append(BSC_AddResource) class SimulStopLite(CTSTest): '''Stop any active nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStopLite" def __call__(self, dummy): '''Perform the 'SimulStopLite' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.incr("WasStarted") watchpats.append(self.templates["Pat:We_stopped"] % node) if len(watchpats) == 0: return self.success() # Stop all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) if watch.lookforall(): # Make sure they're completely down with no residule for node in self.Env["nodes"]: self.rsh(node, self.templates["StopCmd"]) return self.success() did_fail = 0 up_nodes = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 1: did_fail = 1 up_nodes.append(node) if did_fail: return self.failure("Active nodes exist: " + repr(up_nodes)) self.logger.log("Warn: All nodes stopped but CTS didnt detect: " + repr(watch.unmatched)) return self.failure("Missing log message: "+repr(watch.unmatched)) def is_applicable(self): '''SimulStopLite is a setup test and never applicable''' return 0 class SimulStartLite(CTSTest): '''Start any stopped nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStartLite" def __call__(self, dummy): '''Perform the 'SimulStartList' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... node_list = [] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.incr("WasStopped") node_list.append(node) self.set_timer() while len(node_list) > 0: # Repeat until all nodes come up watchpats = [ ] uppat = self.templates["Pat:NonDC_started"] if self.CM.upcount() == 0: uppat = self.templates["Pat:Local_started"] watchpats.append(self.templates["Pat:DC_IDLE"]) for node in node_list: watchpats.append(uppat % node) watchpats.append(self.templates["Pat:InfraUp"] % node) watchpats.append(self.templates["Pat:PacemakerUp"] % node) # Start all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() stonith = self.CM.prepare_fencing_watcher(self.name) for node in node_list: self.CM.StartaCMnoBlock(node) watch.lookforall() node_list = self.CM.fencing_cleanup(self.name, stonith) if node_list == None: return self.failure("Cluster did not stabilize") # Remove node_list messages from watch.unmatched for node in node_list: self.logger.debug("Dealing with stonith operations for %s" % repr(node_list)) if watch.unmatched: try: watch.unmatched.remove(uppat % node) except: self.debug("Already matched: %s" % (uppat % node)) try: watch.unmatched.remove(self.templates["Pat:InfraUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node)) try: watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node)) if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Startup pattern not found: %s" %(regex)) if not self.CM.cluster_stable(): return self.failure("Cluster did not stabilize") did_fail = 0 unstable = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 0: did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstarted nodes exist: " + repr(unstable)) unstable = [] for node in self.Env["nodes"]: if not self.CM.node_stable(node): did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstable cluster nodes exist: " + repr(unstable)) return self.success() def is_applicable(self): '''SimulStartLite is a setup test and never applicable''' return 0 def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.Audits = audits result.append(bound_test) return result class RemoteLXC(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteLXC" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.num_containers = 2 self.is_container = 1 self.is_docker_unsafe = 1 self.failed = 0 self.fail_string = "" def start_lxc_simple(self, node): # restore any artifacts laying around from a previous test. self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") # generate the containers, put them in the config, add some resources to them pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers) self.set_timer("remoteSimpleInit") watch.lookforall() self.log_timer("remoteSimpleInit") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 def cleanup_lxc_simple(self, node): pats = [ ] # if the test failed, attempt to clean up the cib and libvirt environment # as best as possible if self.failed == 1: # restore libvirt and cib self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") return watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null") self.set_timer("remoteSimpleCleanup") watch.lookforall() self.log_timer("remoteSimpleCleanup") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 # cleanup libvirt self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") def __call__(self, node): '''Perform the 'RemoteLXC' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") rc = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null") if rc == 1: self.log("Environment test for lxc support failed.") return self.skipped() self.start_lxc_simple(node) self.cleanup_lxc_simple(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed == 1: return self.failure(self.fail_string) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for ping", r"schedulerd.*: Recover (ping|lxc-ms|container)\s*\(.*\)", # The orphaned lxc-ms resource causes an expected transition error # that is a result of the scheduler not having knowledge that the # promotable resource used to be a clone. As a result, it looks like that # resource is running in multiple locations when it shouldn't... But in # this instance we know why this error is occurring and that it is expected. r"Calculated [Tt]ransition .*pe-error", r"Resource lxc-ms .* is active on 2 nodes attempting recovery", r"Unknown operation: fail", r"VirtualDomain.*ERROR: Unable to determine emulator", ] AllTestClasses.append(RemoteLXC) class RemoteDriver(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = self.__class__.__name__ self.is_docker_unsafe = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.stop = StopTest(cm) self.remote_rsc = "remote-rsc" self.cib_cmd = """cibadmin -C -o %s -X '%s' """ self.reset() def reset(self): self.pcmk_started = 0 self.failed = False self.fail_string = "" self.remote_node_added = 0 self.remote_rsc_added = 0 self.remote_use_reconnect_interval = self.Env.RandomGen.choice([True,False]) def fail(self, msg): """ Mark test as failed. """ self.failed = True # Always log the failure. self.logger.log(msg) # Use first failure as test status, as it's likely to be most useful. if not self.fail_string: self.fail_string = msg def get_othernode(self, node): for othernode in self.Env["nodes"]: if othernode == node: # we don't want to try and use the cib that we just shutdown. # find a cluster node that is not our soon to be remote-node. continue else: return othernode def del_rsc(self, node, rsc): othernode = self.get_othernode(node) rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) if rc != 0: self.fail("Removal of resource '%s' failed" % rsc) def add_rsc(self, node, rsc_xml): othernode = self.get_othernode(node) rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) if rc != 0: self.fail("resource creation failed") def add_primitive_rsc(self, node): rsc_xml = """ """ % { "node": self.remote_rsc } self.add_rsc(node, rsc_xml) if not self.failed: self.remote_rsc_added = 1 def add_connection_rsc(self, node): rsc_xml = """ """ % { "node": self.remote_node, "server": node } if self.remote_use_reconnect_interval: # Set reconnect interval on resource rsc_xml = rsc_xml + """ """ % (self.remote_node) rsc_xml = rsc_xml + """ """ % { "node": self.remote_node } self.add_rsc(node, rsc_xml) if not self.failed: self.remote_node_added = 1 def disable_services(self, node): self.corosync_enabled = self.Env.service_is_enabled(node, "corosync") if self.corosync_enabled: self.Env.disable_service(node, "corosync") self.pacemaker_enabled = self.Env.service_is_enabled(node, "pacemaker") if self.pacemaker_enabled: self.Env.disable_service(node, "pacemaker") def restore_services(self, node): if self.corosync_enabled: self.Env.enable_service(node, "corosync") if self.pacemaker_enabled: self.Env.enable_service(node, "pacemaker") def stop_pcmk_remote(self, node): # disable pcmk remote for i in range(10): rc = self.rsh(node, "service pacemaker_remote stop") if rc != 0: time.sleep(6) else: break def start_pcmk_remote(self, node): for i in range(10): rc = self.rsh(node, "service pacemaker_remote start") if rc != 0: time.sleep(6) else: self.pcmk_started = 1 break def freeze_pcmk_remote(self, node): """ Simulate a Pacemaker Remote daemon failure. """ # We freeze the process. self.rsh(node, "killall -STOP pacemaker-remoted") def resume_pcmk_remote(self, node): # We resume the process. self.rsh(node, "killall -CONT pacemaker-remoted") def start_metal(self, node): # Cluster nodes are reused as remote nodes in remote tests. If cluster # services were enabled at boot, in case the remote node got fenced, the # cluster node would join instead of the expected remote one. Meanwhile # pacemaker_remote would not be able to start. Depending on the chances, # the situations might not be able to be orchestrated gracefully any more. # # Temporarily disable any enabled cluster serivces. self.disable_services(node) pcmk_started = 0 # make sure the resource doesn't already exist for some reason self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc)) self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node)) if not self.stop(node): self.fail("Failed to shutdown cluster node %s" % node) return self.start_pcmk_remote(node) if self.pcmk_started == 0: self.fail("Failed to start pacemaker_remote on node %s" % node) return # Convert node to baremetal now that it has shutdown the cluster stack pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) self.add_connection_rsc(node) self.set_timer("remoteMetalInit") watch.lookforall() self.log_timer("remoteMetalInit") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) def migrate_connection(self, node): if self.failed: return pats = [ ] pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node)) pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(pats, 120) watch.setwatch() (rc, lines) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), None) if rc != 0: self.fail("failed to move remote node connection resource") return self.set_timer("remoteMetalMigrate") watch.lookforall() self.log_timer("remoteMetalMigrate") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return def fail_rsc(self, node): if self.failed: return watchpats = [ ] watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node)) watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, 120) watch.setwatch() self.debug("causing dummy rsc to fail.") rc = self.rsh(node, "rm -f /var/run/resource-agents/Dummy*") self.set_timer("remoteRscFail") watch.lookforall() self.log_timer("remoteRscFail") if watch.unmatched: self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched) def fail_connection(self, node): if self.failed: return watchpats = [ ] watchpats.append(self.templates["Pat:FenceOpOK"] % self.remote_node) watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node) watch = self.create_watch(watchpats, 120) watch.setwatch() # freeze the pcmk remote daemon. this will result in fencing self.debug("Force stopped active remote node") self.freeze_pcmk_remote(node) self.debug("Waiting for remote node to be fenced.") self.set_timer("remoteMetalFence") watch.lookforall() self.log_timer("remoteMetalFence") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return self.debug("Waiting for the remote node to come back up") self.CM.ns.WaitForNodeToComeUp(node, 120); pats = [ ] watch = self.create_watch(pats, 240) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) # start the remote node again watch it integrate back into cluster. self.start_pcmk_remote(node) if self.pcmk_started == 0: self.fail("Failed to start pacemaker_remote on node %s" % node) return self.debug("Waiting for remote node to rejoin cluster after being fenced.") self.set_timer("remoteMetalRestart") watch.lookforall() self.log_timer("remoteMetalRestart") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return def add_dummy_rsc(self, node): if self.failed: return # verify we can put a resource on the remote node pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) # Add a resource that must live on remote-node self.add_primitive_rsc(node) # force that rsc to prefer the remote node. (rc, line) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), None) if rc != 0: self.fail("Failed to place remote resource on remote node.") return self.set_timer("remoteMetalRsc") watch.lookforall() self.log_timer("remoteMetalRsc") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) def test_attributes(self, node): if self.failed: return # This verifies permanent attributes can be set on a remote-node. It also # verifies the remote-node can edit its own cib node section remotely. (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)) return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to get remote-node attribute") return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to delete remote-node attribute") return def cleanup_metal(self, node): self.restore_services(node) if self.pcmk_started == 0: return pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc)) if self.remote_node_added == 1: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node)) self.set_timer("remoteMetalCleanup") self.resume_pcmk_remote(node) if self.remote_rsc_added == 1: # Remove dummy resource added for remote node tests self.debug("Cleaning up dummy rsc put on remote node") self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % self.remote_rsc) self.del_rsc(node, self.remote_rsc) if self.remote_node_added == 1: # Remove remote node's connection resource self.debug("Cleaning up remote node connection resource") self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % (self.remote_node)) self.del_rsc(node, self.remote_node) watch.lookforall() self.log_timer("remoteMetalCleanup") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) self.stop_pcmk_remote(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.remote_node_added == 1: # Remove remote node itself self.debug("Cleaning up node entry for remote node") self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node) def setup_env(self, node): self.remote_node = "remote-%s" % (node) # we are assuming if all nodes have a key, that it is # the right key... If any node doesn't have a remote # key, we regenerate it everywhere. if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]): return # create key locally (handle, keyfile) = tempfile.mkstemp(".cts") os.close(handle) devnull = open(os.devnull, 'wb') subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"], stdout=devnull, stderr=devnull) devnull.close() # sync key throughout the cluster for node in self.Env["nodes"]: self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker") self.rsh.cp(keyfile, "root@%s:/etc/pacemaker/authkey" % node) self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey") self.rsh(node, "chmod 0640 /etc/pacemaker/authkey") os.unlink(keyfile) def is_applicable(self): if not self.is_applicable_common(): return False for node in self.Env["nodes"]: rc = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1") if rc != 0: return False return True def start_new_test(self, node): self.incr("calls") self.reset() ret = self.startall(None) if not ret: return self.failure("setup failed: could not start all nodes") self.setup_env(node) self.start_metal(node) self.add_dummy_rsc(node) return True def __call__(self, node): return self.failure("This base class is not meant to be called directly.") def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"""is running on remote.*which isn't allowed""", r"""Connection terminated""", r"""Could not send remote""", ] # RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses class RemoteBasic(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteBaremetal' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.test_attributes(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() AllTestClasses.append(RemoteBasic) class RemoteStonithd(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteStonithd' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.fail_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def is_applicable(self): if not RemoteDriver.is_applicable(self): return False if "DoFencing" in list(self.Env.keys()): return self.Env["DoFencing"] return True def errorstoignore(self): ignore_pats = [ r"Lost connection to Pacemaker Remote node", r"Software caused connection abort", r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor", r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*", r"schedulerd.*:\s+Recover remote-.*\s*\(.*\)", r"error: Result of monitor operation for .* on remote-.*: No executor connection", ] ignore_pats.extend(RemoteDriver.errorstoignore(self)) return ignore_pats AllTestClasses.append(RemoteStonithd) class RemoteMigrate(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteMigrate' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.migrate_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() AllTestClasses.append(RemoteMigrate) class RemoteRscFailure(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteRscFailure' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) # This is an important step. We are migrating the connection # before failing the resource. This verifies that the migration # has properly maintained control over the remote-node. self.migrate_connection(node) self.fail_rsc(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def errorstoignore(self): ignore_pats = [ r"schedulerd.*: Recover remote-rsc\s*\(.*\)", r"Dummy.*: No process state file found", ] ignore_pats.extend(RemoteDriver.errorstoignore(self)) return ignore_pats AllTestClasses.append(RemoteRscFailure) # vim:ts=4:sw=4:et: diff --git a/cts/OCFIPraTest.py.in b/cts/OCFIPraTest.py.in index 81a5da8c01..aaf63af6d0 100644 --- a/cts/OCFIPraTest.py.in +++ b/cts/OCFIPraTest.py.in @@ -1,176 +1,173 @@ #!@PYTHON@ '''OCF IPaddr/IPaddr2 Resource Agent Test''' -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - __copyright__ = """Original Author: Huang Zhen Copyright 2004 International Business Machines -with later changes copyright 2005-2018 the Pacemaker project contributors. +with later changes copyright 2005-2020 the Pacemaker project contributors. The version control history for this file may have further details. """ __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import os import sys import time import random import struct import syslog from cts.CTSvars import * def usage(): print("usage: " + sys.argv[0] \ + " [-2]"\ + " [--ipbase|-i first-test-ip]"\ + " [--ipnum|-n test-ip-num]"\ + " [--help|-h]"\ + " [--perform|-p op]"\ + " [number-of-iterations]") sys.exit(1) def perform_op(ra, ip, op): os.environ["OCF_RA_VERSION_MAJOR"] = "1" os.environ["OCF_RA_VERSION_MINOR"] = "0" os.environ["OCF_ROOT"] = CTSvars.OCF_ROOT_DIR os.environ["OCF_RESOURCE_INSTANCE"] = ip os.environ["OCF_RESOURCE_TYPE"] = ra os.environ["OCF_RESKEY_ip"] = ip os.environ["HA_LOGFILE"] = "/dev/null" os.environ["HA_LOGFACILITY"] = "local7" path = CTSvars.OCF_ROOT_DIR + "/resource.d/heartbeat/" + ra return os.spawnvpe(os.P_WAIT, path, [ra, op], os.environ) def audit(ra, iplist, ipstatus, summary): passed = 1 for ip in iplist: ret = perform_op(ra, ip, "monitor") if ret != ipstatus[ip]: passed = 0 log("audit: status of %s should be %d but it is %d\t [failure]" % (ip,ipstatus[ip],ret)) ipstatus[ip] = ret summary["audit"]["called"] += 1; if passed : summary["audit"]["success"] += 1 else : summary["audit"]["failure"] += 1 def log(towrite): t = time.strftime("%Y/%m/%d_%H:%M:%S\t", time.localtime(time.time())) logstr = t + " "+str(towrite) syslog.syslog(logstr) print(logstr) if __name__ == '__main__': ra = "IPaddr" ipbase = "127.0.0.10" ipnum = 1 itnum = 50 perform = None summary = { "start":{"called":0,"success":0,"failure":0}, "stop" :{"called":0,"success":0,"failure":0}, "audit":{"called":0,"success":0,"failure":0} } syslog.openlog(sys.argv[0], 0, syslog.LOG_LOCAL7) # Process arguments... skipthis = None args = sys.argv[1:] for i in range(0, len(args)) : if skipthis : skipthis = None continue elif args[i] == "-2" : ra = "IPaddr2" elif args[i] == "--ip" or args[i] == "-i" : skipthis = 1 ipbase = args[i+1] elif args[i] == "--ipnum" or args[i] == "-n" : skipthis = 1 ipnum = int(args[i+1]) elif args[i] == "--perform" or args[i] == "-p" : skipthis = 1 perform = args[i+1] elif args[i] == "--help" or args[i] == "-h" : usage() else: itnum = int(args[i]) log("Begin OCF IPaddr/IPaddr2 Test") # Generate the test ips iplist = [] ipstatus = {} fields = ipbase.split('.') for i in range(0, ipnum) : ip = fields.join('.') iplist.append(ip) ipstatus[ip] = perform_op(ra,ip,"monitor") fields[3] = str(int(fields[3])+1) log("Test ip:" + str(iplist)) # If use ask perform an operation if perform != None: log("Perform opeartion %s"%perform) for ip in iplist: perform_op(ra, ip, perform) log("Done") sys.exit() log("RA Type:" + ra) log("Test Count:" + str(itnum)) # Prepare Random f = open("/dev/urandom", "r") seed = struct.unpack("BBB", f.read(3)) f.close() #seed=(123,321,231) rand = random.Random() rand.seed(seed[0]) log("Test Random Seed:" + str(seed)) # # Begin Tests log(">>>>>>>>>>>>>>>>>>>>>>>>") for i in range(0, itnum): ip = rand.choice(iplist) if ipstatus[ip] == 0: op = "stop" elif ipstatus[ip] == 7: op = "start" else : op = rand.choice(["start","stop"]) ret = perform_op(ra, ip, op) # update status if op == "start" and ret == 0: ipstatus[ip] = 0 elif op == "stop" and ret == 0: ipstatus[ip] = 7 else : ipstatus[ip] = 1 result = "" if ret == 0: result = "success" else : result = "failure" summary[op]["called"] += 1 summary[op][result] += 1 log( "%d:%s %s \t[%s]"%(i, op, ip, result)) audit(ra, iplist, ipstatus, summary) log("<<<<<<<<<<<<<<<<<<<<<<<<") log("start:\t" + str(summary["start"])) log("stop: \t" + str(summary["stop"])) log("audit:\t" + str(summary["audit"])) diff --git a/cts/cib_xml.py b/cts/cib_xml.py index 2c1e94cfec..bf50e0a380 100644 --- a/cts/cib_xml.py +++ b/cts/cib_xml.py @@ -1,323 +1,320 @@ """ CIB XML generator for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2008-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2008-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import sys from cts.CTSvars import * from cts.CIB import CibBase class XmlBase(CibBase): def __init__(self, Factory, tag, _id, **kwargs): CibBase.__init__(self, Factory, tag, _id, **kwargs) def show(self): text = '''<%s''' % self.tag if self.name: text += ''' id="%s"''' % (self.name) for k in list(self.kwargs.keys()): text += ''' %s="%s"''' % (k, self.kwargs[k]) if not self.children: text += '''/>''' return text text += '''>''' for c in self.children: text += c.show() text += '''''' % self.tag return text def _run(self, operation, xml, section="all", options=""): if self.name: label = self.name else: label = "<%s>" % self.tag self.Factory.debug("Writing out %s" % label) fixed = "HOME=/root CIB_file="+self.Factory.tmpfile fixed += " cibadmin --%s --scope %s %s --xml-text '%s'" % (operation, section, options, xml) rc = self.Factory.rsh(self.Factory.target, fixed) if rc != 0: self.Factory.log("Configure call failed: "+fixed) sys.exit(1) class InstanceAttributes(XmlBase): """ Create an section with name-value pairs """ def __init__(self, Factory, name, attrs): XmlBase.__init__(self, Factory, "instance_attributes", name) # Create an for each attribute for (attr, value) in list(attrs.items()): self.add_child(XmlBase(Factory, "nvpair", "%s-%s" % (name, attr), name=attr, value=value)) class Node(XmlBase): """ Create a section with node attributes for one node """ def __init__(self, Factory, node_name, node_id, node_attrs): XmlBase.__init__(self, Factory, "node", node_id, uname=node_name) self.add_child(InstanceAttributes(Factory, "%s-1" % node_name, node_attrs)) class Nodes(XmlBase): """ Create a section """ def __init__(self, Factory): XmlBase.__init__(self, Factory, "nodes", None) def add_node(self, node_name, node_id, node_attrs): self.add_child(Node(self.Factory, node_name, node_id, node_attrs)) def commit(self): self._run("modify", self.show(), "configuration", "--allow-create") class FencingTopology(XmlBase): def __init__(self, Factory): XmlBase.__init__(self, Factory, "fencing-topology", None) def level(self, index, target, devices, target_attr=None, target_value=None): # Generate XML ID (sanitizing target-by-attribute levels) if target: xml_id = "cts-%s.%d" % (target, index) self.add_child(XmlBase(self.Factory, "fencing-level", xml_id, target=target, index=index, devices=devices)) else: xml_id = "%s-%s.%d" % (target_attr, target_value, index) child = XmlBase(self.Factory, "fencing-level", xml_id, index=index, devices=devices) child["target-attribute"]=target_attr child["target-value"]=target_value self.add_child(child) def commit(self): self._run("create", self.show(), "configuration", "--allow-create") class Option(XmlBase): def __init__(self, Factory, section="cib-bootstrap-options"): XmlBase.__init__(self, Factory, "cluster_property_set", section) def __setitem__(self, key, value): self.add_child(XmlBase(self.Factory, "nvpair", "cts-%s" % key, name=key, value=value)) def commit(self): self._run("modify", self.show(), "crm_config", "--allow-create") class OpDefaults(XmlBase): def __init__(self, Factory): XmlBase.__init__(self, Factory, "op_defaults", None) self.meta = XmlBase(self.Factory, "meta_attributes", "cts-op_defaults-meta") self.add_child(self.meta) def __setitem__(self, key, value): self.meta.add_child(XmlBase(self.Factory, "nvpair", "cts-op_defaults-%s" % key, name=key, value=value)) def commit(self): self._run("modify", self.show(), "configuration", "--allow-create") class Alerts(XmlBase): def __init__(self, Factory): XmlBase.__init__(self, Factory, "alerts", None) self.alert_count = 0 def add_alert(self, path, recipient): self.alert_count = self.alert_count + 1 alert = XmlBase(self.Factory, "alert", "alert-%d" % self.alert_count, path=path) recipient1 = XmlBase(self.Factory, "recipient", "alert-%d-recipient-1" % self.alert_count, value=recipient) alert.add_child(recipient1) self.add_child(alert) def commit(self): self._run("modify", self.show(), "configuration", "--allow-create") class Expression(XmlBase): def __init__(self, Factory, name, attr, op, value=None): XmlBase.__init__(self, Factory, "expression", name, attribute=attr, operation=op) if value: self["value"] = value class Rule(XmlBase): def __init__(self, Factory, name, score, op="and", expr=None): XmlBase.__init__(self, Factory, "rule", "%s" % name) self["boolean-op"] = op self["score"] = score if expr: self.add_child(expr) class Resource(XmlBase): def __init__(self, Factory, name, rtype, standard, provider=None): XmlBase.__init__(self, Factory, "native", name) self.rtype = rtype self.standard = standard self.provider = provider self.op = [] self.meta = {} self.param = {} self.scores = {} self.needs = {} self.coloc = {} if self.standard == "ocf" and not provider: self.provider = "heartbeat" elif self.standard == "lsb": self.provider = None def __setitem__(self, key, value): self.add_param(key, value) def add_op(self, name, interval, **kwargs): self.op.append( XmlBase(self.Factory, "op", "%s-%s" % (name, interval), name=name, interval=interval, **kwargs)) def add_param(self, name, value): self.param[name] = value def add_meta(self, name, value): self.meta[name] = value def prefer(self, node, score="INFINITY", rule=None): if not rule: rule = Rule(self.Factory, "prefer-%s-r" % node, score, expr=Expression(self.Factory, "prefer-%s-e" % node, "#uname", "eq", node)) self.scores[node] = rule def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs): kargs = kwargs.copy() kargs["kind"] = kind if then: kargs["first-action"] = "start" kargs["then-action"] = then if first: kargs["first-action"] = first self.needs[resource] = kargs def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs): kargs = kwargs.copy() kargs["score"] = score if role: kargs["rsc-role"] = role if withrole: kargs["with-rsc-role"] = withrole self.coloc[resource] = kargs def constraints(self): text = "" for k in list(self.scores.keys()): text += '''''' % (k, self.name) text += self.scores[k].show() text += '''''' for k in list(self.needs.keys()): text += '''''' for k in list(self.coloc.keys()): text += '''''' text += "" return text def show(self): text = '''''' if len(self.meta) > 0: text += '''''' % self.name for p in list(self.meta.keys()): text += '''''' % (self.name, p, p, self.meta[p]) text += '''''' if len(self.param) > 0: text += '''''' % self.name for p in list(self.param.keys()): text += '''''' % (self.name, p, p, self.param[p]) text += '''''' if len(self.op) > 0: text += '''''' for o in self.op: key = o.name o.name = "%s-%s" % (self.name, key) text += o.show() o.name = key text += '''''' text += '''''' return text def commit(self): self._run("create", self.show(), "resources") self._run("modify", self.constraints()) class Group(Resource): def __init__(self, Factory, name): Resource.__init__(self, Factory, name, None, None) self.tag = "group" def __setitem__(self, key, value): self.add_meta(key, value) def show(self): text = '''<%s id="%s">''' % (self.tag, self.name) if len(self.meta) > 0: text += '''''' % self.name for p in list(self.meta.keys()): text += '''''' % (self.name, p, p, self.meta[p]) text += '''''' for c in self.children: text += c.show() text += '''''' % self.tag return text class Clone(Group): def __init__(self, Factory, name, child=None): Group.__init__(self, Factory, name) self.tag = "clone" if child: self.add_child(child) def add_child(self, resource): if not self.children: self.children.append(resource) else: self.Factory.log("Clones can only have a single child. Ignoring %s" % resource.name) diff --git a/cts/cts-exec.in b/cts/cts-exec.in index 0bc6899ce1..3cd3d42d37 100644 --- a/cts/cts-exec.in +++ b/cts/cts-exec.in @@ -1,1265 +1,1245 @@ #!@PYTHON@ """ Regression tests for Pacemaker's pacemaker-execd """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2012-2019 the Pacemaker project contributors" +__copyright__ = "Copyright 2012-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import stat import sys import subprocess import shlex import shutil import time # Where to find test binaries # Prefer the source tree if available BUILD_DIR = "@abs_top_builddir@" TEST_DIR = sys.path[0] SBIN_DIR = "@sbindir@" # File permissions for executable scripts we create EXECMODE = stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH # These values must be kept in sync with include/crm/crm.h class CrmExit(object): OK = 0 ERROR = 1 INVALID_PARAM = 2 UNIMPLEMENT_FEATURE = 3 INSUFFICIENT_PRIV = 4 NOT_INSTALLED = 5 NOT_CONFIGURED = 6 NOT_RUNNING = 7 USAGE = 64 DATAERR = 65 NOINPUT = 66 NOUSER = 67 NOHOST = 68 UNAVAILABLE = 69 SOFTWARE = 70 OSERR = 71 OSFILE = 72 CANTCREAT = 73 IOERR = 74 TEMPFAIL = 75 PROTOCOL = 76 NOPERM = 77 CONFIG = 78 FATAL = 100 PANIC = 101 DISCONNECT = 102 SOLO = 103 DIGEST = 104 NOSUCH = 105 QUORUM = 106 UNSAFE = 107 EXISTS = 108 MULTIPLE = 109 OLD = 110 TIMEOUT = 124 MAX = 255 def update_path(): """ Set the PATH environment variable appropriately for the tests """ new_path = os.environ['PATH'] if os.path.exists("%s/cts-exec.in" % TEST_DIR): print("Running tests from the source tree: %s (%s)" % (BUILD_DIR, TEST_DIR)) # For pacemaker-execd, cts-exec-helper, and pacemaker-remoted new_path = "%s/daemons/execd:%s" % (BUILD_DIR, new_path) new_path = "%s/tools:%s" % (BUILD_DIR, new_path) # For crm_resource # For pacemaker-fenced new_path = "%s/daemons/fenced:%s" % (BUILD_DIR, new_path) # For cts-support new_path = "%s/cts:%s" % (BUILD_DIR, new_path) else: print("Running tests from the install tree: @CRM_DAEMON_DIR@ (not %s)" % TEST_DIR) # For cts-exec-helper, cts-support, pacemaker-execd, pacemaker-fenced, # and pacemaker-remoted new_path = "@CRM_DAEMON_DIR@:%s" % (new_path) print('Using PATH="{}"'.format(new_path)) os.environ['PATH'] = new_path -def pipe_output(pipes, stdout=True, stderr=False): - """ Wrapper to get text output from pipes regardless of Python version """ - - output = "" - pipe_outputs = pipes.communicate() - if sys.version_info < (3,): - if stdout: - output = output + pipe_outputs[0] - if stderr: - output = output + pipe_outputs[1] - else: - if stdout: - output = output + pipe_outputs[0].decode(sys.stdout.encoding) - if stderr: - output = output + pipe_outputs[1].decode(sys.stderr.encoding) - return output - - def output_from_command(command): """ Run a command, and return its standard output. """ test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE) test.wait() - return pipe_output(test).split("\n") + output = test.communicate()[0].decode(sys.stdout.encoding) + return output.split("\n") class TestError(Exception): """ Base class for exceptions in this module """ pass class ExitCodeError(TestError): """ Exception raised when command exit status is unexpected """ def __init__(self, exit_code): self.exit_code = exit_code def __str__(self): return repr(self.exit_code) class OutputNotFoundError(TestError): """ Exception raised when command output does not contain wanted string """ def __init__(self, output): self.output = output def __str__(self): return repr(self.output) class OutputFoundError(TestError): """ Exception raised when command output contains unwanted string """ def __init__(self, output): self.output = output def __str__(self): return repr(self.output) class Test(object): """ Executor for a single pacemaker-execd regression test """ def __init__(self, name, description, verbose=0, tls=0, timeout=2, force_wait=0): self.name = name self.description = description self.cmds = [] self.logpath = "/tmp/pacemaker-execd-regression.log" if tls: self.daemon_location = "pacemaker-remoted" else: self.daemon_location = "pacemaker-execd" self.test_tool_location = "cts-exec-helper" self.verbose = verbose self.tls = tls self.timeout = timeout self.force_wait = force_wait self.result_txt = "" self.cmd_tool_output = "" self.result_exitcode = CrmExit.OK self.execd_process = None self.stonith_process = None self.executed = 0 def __new_cmd(self, cmd, args, exitcode, stdout_match="", no_wait=0, stdout_negative_match="", kill=None): """ Add a command to be executed as part of this test """ if self.verbose and cmd == self.test_tool_location: args = args + " -V " if (cmd == self.test_tool_location) and self.tls: args = args + " -S " self.cmds.append( { "cmd" : cmd, "kill" : kill, "args" : args, "expected_exitcode" : exitcode, "stdout_match" : stdout_match, "stdout_negative_match" : stdout_negative_match, "no_wait" : no_wait, "cmd_output" : "", } ) def start_environment(self): """ Prepare the host for running a test """ ### make sure we are in full control here ### cmd = shlex.split("killall -q -9 pacemaker-fenced lt-pacemaker-fenced pacemaker-execd lt-pacemaker-execd cts-exec-helper lt-cts-exec-helper pacemaker-remoted") test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() additional_args = "" if self.tls == 0: self.stonith_process = subprocess.Popen(shlex.split("pacemaker-fenced -s")) if self.verbose: additional_args = additional_args + " -V" self.execd_process = subprocess.Popen(shlex.split("%s %s -l %s" % (self.daemon_location, additional_args, self.logpath))) logfile = None init_time = time.time() update_time = init_time while True: time.sleep(0.1) if self.force_wait == 0 and logfile == None \ and os.path.exists(self.logpath): logfile = io.open(self.logpath, 'rt', encoding = "ISO-8859-1") if self.force_wait == 0 and logfile != None: for line in logfile.readlines(): if "successfully started" in line: return now = time.time() if self.timeout > 0 and (now - init_time) >= self.timeout: if self.force_wait == 0: print("\tDaemon %s doesn't seem to have been initialized within %fs." "\n\tConsider specifying a longer '--timeout' value." %(self.daemon_location, self.timeout)) return if self.verbose and (now - update_time) >= 5: print("Waiting for %s to be initialized: %fs ..." %(self.daemon_location, now - init_time)) update_time = now def clean_environment(self): """ Clean up the host after running a test """ if self.execd_process: self.execd_process.terminate() self.execd_process.wait() if self.verbose: print("Daemon output") logfile = io.open(self.logpath, 'rt', errors='replace') for line in logfile: print(line.strip().encode('utf-8', 'replace')) os.remove(self.logpath) if self.stonith_process: self.stonith_process.terminate() self.stonith_process.wait() self.execd_process = None self.stonith_process = None def add_sys_cmd(self, cmd, args): """ Add a simple command to be executed as part of this test """ self.__new_cmd(cmd, args, CrmExit.OK, "") def add_cmd_check_stdout(self, args, match, no_match=""): """ Add a command with expected output to be executed as part of this test """ self.__new_cmd(self.test_tool_location, args, CrmExit.OK, match, 0, no_match) def add_cmd(self, args): """ Add a cts-exec-helper command to be executed as part of this test """ self.__new_cmd(self.test_tool_location, args, CrmExit.OK, "") def add_cmd_and_kill(self, kill_proc, args): """ Add a cts-exec-helper command and system command to be executed as part of this test """ self.__new_cmd(self.test_tool_location, args, CrmExit.OK, "", kill=kill_proc) def add_expected_fail_cmd(self, args, exitcode=CrmExit.ERROR): """ Add a cts-exec-helper command to be executed as part of this test and expected to fail """ self.__new_cmd(self.test_tool_location, args, exitcode, "") def get_exitcode(self): """ Return the exit status of the last test execution """ return self.result_exitcode def print_result(self, filler): """ Print the result of the last test execution """ print("%s%s" % (filler, self.result_txt)) def run_cmd(self, args): """ Execute a command as part of this test """ cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print("\n\nRunning: "+" ".join(cmd)) test = subprocess.Popen(cmd, stdout=subprocess.PIPE) if args['kill']: if self.verbose: print("Also running: "+args['kill']) ### Typically, the kill argument is used to detect some sort of ### failure. Without yielding for a few seconds here, the process ### launched earlier that is listening for the failure may not have ### time to connect to pacemaker-execd. time.sleep(2) subprocess.Popen(shlex.split(args['kill'])) if args['no_wait'] == 0: test.wait() else: return CrmExit.OK - output = pipe_output(test) + output = test.communicate()[0].decode(sys.stdout.encoding) args['cmd_output'] = output if test.returncode != args['expected_exitcode']: raise ExitCodeError(test.returncode) if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0: raise OutputNotFoundError(output) if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0: raise OutputFoundError(output) def set_error(self, step, cmd): """ Record failure of this test """ msg = "FAILURE - '%s' failed at step %d. Command: %s %s" self.result_txt = msg % (self.name, step, cmd['cmd'], cmd['args']) self.result_exitcode = CrmExit.ERROR def run(self): """ Execute this test. """ res = 0 i = 1 if self.tls and self.name.count("stonith") != 0: self.result_txt = "SKIPPED - '%s' - disabled when testing pacemaker_remote" % (self.name) print(self.result_txt) return res self.start_environment() if self.verbose: print("\n--- START TEST - %s" % self.name) self.result_txt = "SUCCESS - '%s'" % (self.name) self.result_exitcode = CrmExit.OK for cmd in self.cmds: try: self.run_cmd(cmd) except ExitCodeError as e: print(cmd['cmd_output']) print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode'])) self.set_error(i, cmd); break except OutputNotFoundError as e: print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e)) self.set_error(i, cmd); break except OutputFoundError as e: print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_negative_match'], e)) self.set_error(i, cmd); break if self.verbose: print(cmd['cmd_output'].strip()) print("Step %d SUCCESS" % (i)) i = i + 1 self.clean_environment() print(self.result_txt) if self.verbose: print("--- END TEST - %s\n" % self.name) self.executed = 1 return res class Tests(object): """ Collection of all pacemaker-execd regression tests """ def __init__(self, verbose=0, tls=0, timeout=2, force_wait=0): self.tests = [] self.verbose = verbose self.tls = tls self.timeout = timeout self.force_wait = force_wait self.rsc_classes = output_from_command("crm_resource --list-standards") self.rsc_classes = self.rsc_classes[:-1] # Strip trailing empty line self.installed_files = [] self.action_timeout = " -t 9000 " if self.tls: self.rsc_classes.remove("stonith") if "systemd" in self.rsc_classes: try: # This code doesn't need this import, but pacemaker-cts-dummyd # does, so ensure the dependency is available rather than cause # all systemd tests to fail. import systemd.daemon except ImportError: print("Python systemd bindings not found.") print("The tests for systemd class are not going to be run.") self.rsc_classes.remove("systemd") print("Testing resource classes", repr(self.rsc_classes)) self.common_cmds = { "ocf_reg_line" : "-c register_rsc -r ocf_test_rsc "+self.action_timeout+" -C ocf -P pacemaker -T Dummy", "ocf_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"", "ocf_unreg_line" : "-c unregister_rsc -r \"ocf_test_rsc\" "+self.action_timeout, "ocf_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"", "ocf_start_line" : "-c exec -r \"ocf_test_rsc\" -a \"start\" "+self.action_timeout, "ocf_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:start rc:ok op_status:complete\" ", "ocf_stop_line" : "-c exec -r \"ocf_test_rsc\" -a \"stop\" "+self.action_timeout, "ocf_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:stop rc:ok op_status:complete\" ", "ocf_monitor_line" : '-c exec -r ocf_test_rsc -a monitor -i 2s ' + self.action_timeout, "ocf_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, "ocf_cancel_line" : '-c cancel -r ocf_test_rsc -a monitor -i 2s ' + self.action_timeout, "ocf_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "systemd_reg_line" : "-c register_rsc -r systemd_test_rsc " + self.action_timeout + " -C systemd -T pacemaker-cts-dummyd@3", "systemd_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"", "systemd_unreg_line" : "-c unregister_rsc -r \"systemd_test_rsc\" "+self.action_timeout, "systemd_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"", "systemd_start_line" : "-c exec -r \"systemd_test_rsc\" -a \"start\" "+self.action_timeout, "systemd_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:start rc:ok op_status:complete\" ", "systemd_stop_line" : "-c exec -r \"systemd_test_rsc\" -a \"stop\" "+self.action_timeout, "systemd_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:stop rc:ok op_status:complete\" ", "systemd_monitor_line" : '-c exec -r systemd_test_rsc -a monitor -i 2s ' + self.action_timeout, "systemd_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:complete\" -t 15000 ", "systemd_cancel_line" : '-c cancel -r systemd_test_rsc -a monitor -i 2s ' + self.action_timeout, "systemd_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "upstart_reg_line" : "-c register_rsc -r upstart_test_rsc "+self.action_timeout+" -C upstart -T pacemaker-cts-dummyd", "upstart_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"", "upstart_unreg_line" : "-c unregister_rsc -r \"upstart_test_rsc\" "+self.action_timeout, "upstart_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"", "upstart_start_line" : "-c exec -r \"upstart_test_rsc\" -a \"start\" "+self.action_timeout, "upstart_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:start rc:ok op_status:complete\" ", "upstart_stop_line" : "-c exec -r \"upstart_test_rsc\" -a \"stop\" "+self.action_timeout, "upstart_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:stop rc:ok op_status:complete\" ", "upstart_monitor_line" : '-c exec -r upstart_test_rsc -a monitor -i 2s ' + self.action_timeout, "upstart_monitor_event" : '-l "NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:complete" -t 15000', "upstart_cancel_line" : '-c cancel -r upstart_test_rsc -a monitor -i 2s ' + self.action_timeout, "upstart_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "service_reg_line" : "-c register_rsc -r service_test_rsc "+self.action_timeout+" -C service -T LSBDummy", "service_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:service_test_rsc action:none rc:ok op_status:complete\"", "service_unreg_line" : "-c unregister_rsc -r \"service_test_rsc\" "+self.action_timeout, "service_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:service_test_rsc action:none rc:ok op_status:complete\"", "service_start_line" : "-c exec -r \"service_test_rsc\" -a \"start\" "+self.action_timeout, "service_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:start rc:ok op_status:complete\" ", "service_stop_line" : "-c exec -r \"service_test_rsc\" -a \"stop\" "+self.action_timeout, "service_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:stop rc:ok op_status:complete\" ", "service_monitor_line" : '-c exec -r service_test_rsc -a monitor -i 2s ' + self.action_timeout, "service_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, "service_cancel_line" : '-c cancel -r service_test_rsc -a monitor -i 2s ' + self.action_timeout, "service_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "lsb_reg_line" : "-c register_rsc -r lsb_test_rsc "+self.action_timeout+" -C lsb -T LSBDummy", "lsb_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\" ", "lsb_unreg_line" : "-c unregister_rsc -r \"lsb_test_rsc\" "+self.action_timeout, "lsb_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\"", "lsb_start_line" : "-c exec -r \"lsb_test_rsc\" -a \"start\" "+self.action_timeout, "lsb_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:start rc:ok op_status:complete\" ", "lsb_stop_line" : "-c exec -r \"lsb_test_rsc\" -a \"stop\" "+self.action_timeout, "lsb_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:stop rc:ok op_status:complete\" ", "lsb_monitor_line" : '-c exec -r lsb_test_rsc -a status -i 2s ' + self.action_timeout, "lsb_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:complete\" "+self.action_timeout, "lsb_cancel_line" : '-c cancel -r lsb_test_rsc -a status -i 2s ' + self.action_timeout, "lsb_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:Cancelled\" ", "stonith_reg_line" : "-c register_rsc -r stonith_test_rsc " + self.action_timeout + " -C stonith -P pacemaker -T fence_dummy", "stonith_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\" ", "stonith_unreg_line" : "-c unregister_rsc -r \"stonith_test_rsc\" "+self.action_timeout, "stonith_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\"", "stonith_start_line" : '-c exec -r stonith_test_rsc -a start ' + self.action_timeout, "stonith_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:start rc:ok op_status:complete\" ", "stonith_stop_line" : "-c exec -r \"stonith_test_rsc\" -a \"stop\" "+self.action_timeout, "stonith_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:stop rc:ok op_status:complete\" ", "stonith_monitor_line" : '-c exec -r stonith_test_rsc -a monitor -i 2s ' + self.action_timeout, "stonith_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, "stonith_cancel_line" : '-c cancel -r stonith_test_rsc -a monitor -i 2s ' + self.action_timeout, "stonith_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:Cancelled\" ", } def new_test(self, name, description): """ Create a named test """ test = Test(name, description, self.verbose, self.tls, self.timeout, self.force_wait) self.tests.append(test) return test def setup_test_environment(self): """ Prepare the host before executing any tests """ os.system("service pacemaker_remote stop") self.cleanup_test_environment() if self.tls and not os.path.isfile("/etc/pacemaker/authkey"): print("Installing /etc/pacemaker/authkey ...") os.system("mkdir -p /etc/pacemaker") os.system("dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1") self.installed_files.append("/etc/pacemaker/authkey") # If we're in build directory, install agents if not already installed if os.path.exists("%s/cts/cts-exec.in" % BUILD_DIR): if not os.path.exists("@OCF_RA_DIR@/pacemaker"): # @TODO remember which components were created and remove them os.makedirs("@OCF_RA_DIR@/pacemaker", 0o755) for agent in ["Dummy", "Stateful", "ping"]: agent_source = "%s/extra/resources/%s" % (BUILD_DIR, agent) agent_dest = "@OCF_RA_DIR@/pacemaker/%s" % (agent) if not os.path.exists(agent_dest): print("Installing %s ..." % (agent_dest)) shutil.copyfile(agent_source, agent_dest) os.chmod(agent_dest, EXECMODE) self.installed_files.append(agent_dest) subprocess.call(["cts-support", "install"]) def cleanup_test_environment(self): """ Clean up the host after executing desired tests """ for installed_file in self.installed_files: print("Removing %s ..." % (installed_file)) os.remove(installed_file) subprocess.call(["cts-support", "uninstall"]) def build_generic_tests(self): """ Register tests that apply to all resource classes """ common_cmds = self.common_cmds ### register/unregister tests ### for rsc in self.rsc_classes: test = self.new_test("generic_registration_%s" % (rsc), "Simple resource registration test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### start/stop tests ### for rsc in self.rsc_classes: test = self.new_test("generic_start_stop_%s" % (rsc), "Simple start and stop test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### monitor cancel test ### for rsc in self.rsc_classes: test = self.new_test("generic_monitor_cancel_%s" % (rsc), "Simple monitor cancel test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### monitor duplicate test ### for rsc in self.rsc_classes: test = self.new_test("generic_monitor_duplicate_%s" % (rsc), "Test creation and canceling of duplicate monitors for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) # Add the duplicate monitors test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) # verify we still get update events ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) # cancel the monitor, if the duplicate merged with the original, we should no longer see monitor updates test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### stop implies cancel test ### for rsc in self.rsc_classes: test = self.new_test("generic_stop_implies_cancel_%s" % (rsc), "Verify stopping a resource implies cancel of recurring ops for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)], CrmExit.TIMEOUT) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) def build_multi_rsc_tests(self): """ Register complex tests that involve managing multiple resouces of different types """ common_cmds = self.common_cmds # do not use service and systemd at the same time, it is the same resource. ### register start monitor stop unregister resources of each type at the same time. ### test = self.new_test("multi_rsc_start_stop_all", "Start, monitor, and stop resources of multiple types and classes") for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) for rsc in self.rsc_classes: ### If this fails, that means the monitor is not being rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) def build_negative_tests(self): """ Register tests related to how pacemaker-execd handles failures """ ### ocf start timeout test ### test = self.new_test("ocf_start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" " + self.action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") # -t must be less than self.action_timeout test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -k \"op_sleep\" -v \"5\" -t 1000 -w") test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:error op_status:Timed Out" ' + self.action_timeout) test.add_cmd("-c exec -r test_rsc -a stop " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### stonith start timeout test ### test = self.new_test("stonith_start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd('-c register_rsc -r test_rsc ' + '-C stonith -P pacemaker -T fence_dummy ' + self.action_timeout + '-l "NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete"') test.add_cmd('-c exec -r test_rsc -a start -k monitor_delay -v 30 ' + '-t 1000 -w') # -t must be less than self.action_timeout test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:OCF_TIMEOUT op_status:Timed Out" ' + self.action_timeout) test.add_cmd("-c exec -r test_rsc -a stop " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### stonith component fail ### common_cmds = self.common_cmds test = self.new_test("stonith_component_fail", "Kill stonith component after pacemaker-execd connects") test.add_cmd(common_cmds["stonith_reg_line"] + " " + common_cmds["stonith_reg_event"]) test.add_cmd(common_cmds["stonith_start_line"] + " " + common_cmds["stonith_start_event"]) test.add_cmd('-c exec -r stonith_test_rsc -a monitor -i 600s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete" ' + self.action_timeout) test.add_cmd_and_kill("killall -9 -q pacemaker-fenced lt-pacemaker-fenced", '-l "NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:error op_status:error" -t 15000') test.add_cmd(common_cmds["stonith_unreg_line"] + " " + common_cmds["stonith_unreg_event"]) ### monitor fail for ocf resources ### test = self.new_test("monitor_fail_ocf", "Force ocf monitor to fail, verify failure is reported.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" " + self.action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self.action_timeout + '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"') test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' + self.action_timeout) test.add_cmd('-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' + self.action_timeout) test.add_cmd_and_kill("rm -f @localstatedir@/run/Dummy-test_rsc.state", '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete" ' + self.action_timeout) test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s ' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" " + self.action_timeout, CrmExit.TIMEOUT) test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" " + self.action_timeout, CrmExit.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### verify notify changes only for monitor operation. ### test = self.new_test("monitor_changes_only", "Verify when flag is set, only monitor changes are notified.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+" -o " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self.action_timeout + ' -o -l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete" ') test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd_and_kill('rm -f @localstatedir@/run/Dummy-test_rsc.state', '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self.action_timeout) test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd('-c unregister_rsc -r "test_rsc" ' + self.action_timeout + '-l "NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete"') ### monitor fail for systemd resource ### if "systemd" in self.rsc_classes: test = self.new_test("monitor_fail_systemd", "Force systemd monitor to fail, verify failure is reported..") test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T pacemaker-cts-dummyd@3 " + self.action_timeout + "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) - test.add_cmd_and_kill("killall -9 -q pacemaker-cts-dummyd", + test.add_cmd_and_kill("pkill -9 -f pacemaker-cts-dummyd", '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self.action_timeout) test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### monitor fail for upstart resource ### if "upstart" in self.rsc_classes: test = self.new_test("monitor_fail_upstart", "Force upstart monitor to fail, verify failure is reported..") test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T pacemaker-cts-dummyd "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd_and_kill('killall -9 -q dd', '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete"' + self.action_timeout) test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Cancel non-existent operation on a resource ### test = self.new_test("cancel_non_existent_op", "Attempt to cancel the wrong monitor operation, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r test_rsc -a monitor -i 1s ' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout) ### interval is wrong, should fail test.add_expected_fail_cmd('-c cancel -r test_rsc -a monitor -i 2s' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") ### action name is wrong, should fail test.add_expected_fail_cmd('-c cancel -r test_rsc -a stop -i 1s' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Attempt to invoke non-existent rsc id ### test = self.new_test("invoke_non_existent_rsc", "Attempt to perform operations on a non-existent rsc id.") test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:error op_status:complete\" ") test.add_expected_fail_cmd("-c exec -r test_rsc -a stop "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_expected_fail_cmd('-c exec -r test_rsc -a monitor -i 6s ' + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_expected_fail_cmd("-c cancel -r test_rsc -a start "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register and start a resource that doesn't exist, systemd ### if "systemd" in self.rsc_classes: test = self.new_test("start_uninstalled_systemd", "Register uninstalled systemd agent, try to start, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T this_is_fake1234 "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") if "upstart" in self.rsc_classes: test = self.new_test("start_uninstalled_upstart", "Register uninstalled upstart agent, try to start, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T this_is_fake1234 "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register and start a resource that doesn't exist, ocf ### test = self.new_test("start_uninstalled_ocf", "Register uninstalled ocf agent, try to start, verify expected failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pacemaker -T this_is_fake1234 "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register ocf with non-existent provider ### test = self.new_test("start_ocf_bad_provider", "Register ocf agent with a non-existent provider, verify expected failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pancakes -T Dummy "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:Not installed\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register ocf with empty provider field ### test = self.new_test("start_ocf_no_provider", "Register ocf agent with a no provider, verify expected failure.") test.add_expected_fail_cmd("-c register_rsc -r \"test_rsc\" -C ocf -T Dummy "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Error\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") def build_stress_tests(self): """ Register stress tests """ timeout = "-t 20000" iterations = 25 test = self.new_test("ocf_stress", "Verify OCF agent handling works under load") for i in range(iterations): test.add_cmd("-c register_rsc -r rsc_%s %s -C ocf -P heartbeat -T Dummy -l \"NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c exec -r rsc_%s -a start %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd('-c exec -r rsc_%s -a monitor %s -i 1s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete"' % (i, timeout, i)) for i in range(iterations): test.add_cmd("-c exec -r rsc_%s -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c unregister_rsc -r rsc_%s %s -l \"NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) if "systemd" in self.rsc_classes: test = self.new_test("systemd_stress", "Verify systemd dbus connection works under load") for i in range(iterations): test.add_cmd("-c register_rsc -r rsc_%s %s -C systemd -T pacemaker-cts-dummyd@3 -l \"NEW_EVENT event_type:register rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c exec -r rsc_%s -a start %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:start rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd('-c exec -r rsc_%s -a monitor %s -i 1s ' '-l "NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:monitor rc:ok op_status:complete"' % (i, timeout, i)) for i in range(iterations): test.add_cmd("-c exec -r rsc_%s -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:rsc_%s action:stop rc:ok op_status:complete\"" % (i, timeout, i)) test.add_cmd("-c unregister_rsc -r rsc_%s %s -l \"NEW_EVENT event_type:unregister rsc_id:rsc_%s action:none rc:ok op_status:complete\"" % (i, timeout, i)) iterations = 9 timeout = "-t 30000" ### Verify recurring op in-flight collision is handled in series properly test = self.new_test("rsc_inflight_collision", "Verify recurring ops do not collide with other operations for the same rsc.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-c exec -r test_rsc -a start %s -k op_sleep -v 1 -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\"" % (timeout)) for i in range(iterations): test.add_cmd('-c exec -r test_rsc -a monitor %s -i 100%dms ' '-k op_sleep -v 2 ' '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"' % (timeout, i)) test.add_cmd("-c exec -r test_rsc -a stop %s -l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\"" % (timeout)) test.add_cmd("-c unregister_rsc -r test_rsc %s -l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\"" % (timeout)) def build_custom_tests(self): """ Register tests that target specific cases """ ### verify resource temporary folder is created and used by OCF agents. ### test = self.new_test("rsc_tmp_dir", "Verify creation and use of rsc temporary state directory") test.add_sys_cmd("ls", "-al @CRM_RSCTMP_DIR@") test.add_cmd("-c register_rsc -r test_rsc -P heartbeat -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-c exec -r test_rsc -a start -t 4000") test.add_sys_cmd("ls", "-al @CRM_RSCTMP_DIR@") test.add_sys_cmd("ls", "@CRM_RSCTMP_DIR@/Dummy-test_rsc.state") test.add_cmd("-c exec -r test_rsc -a stop -t 4000") test.add_cmd("-c unregister_rsc -r test_rsc "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### start delay then stop test ### test = self.new_test("start_delay", "Verify start delay works as expected.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-c exec -r test_rsc -s 6000 -a start -w -t 6000") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 2000", CrmExit.TIMEOUT) test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 6000") test.add_cmd("-c exec -r test_rsc -a stop " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### start delay, but cancel before it gets a chance to start. ### test = self.new_test("start_delay_cancel", "Using start_delay, start a rsc, but cancel the start op before execution.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" "+self.action_timeout) test.add_cmd("-c exec -r test_rsc -s 5000 -a start -w -t 4000") test.add_cmd("-c cancel -r test_rsc -a start " + self.action_timeout + "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 5000", CrmExit.TIMEOUT) test.add_cmd("-c unregister_rsc -r test_rsc " + self.action_timeout + "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register a bunch of resources, verify we can get info on them ### test = self.new_test("verify_get_rsc_info", "Register multiple resources, verify retrieval of rsc info.") if "systemd" in self.rsc_classes: test.add_cmd("-c register_rsc -r rsc1 -C systemd -T pacemaker-cts-dummyd@3 "+self.action_timeout) test.add_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c unregister_rsc -r rsc1 "+self.action_timeout) test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ") if "upstart" in self.rsc_classes: test.add_cmd("-c register_rsc -r rsc1 -C upstart -T pacemaker-cts-dummyd "+self.action_timeout) test.add_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c unregister_rsc -r rsc1 "+self.action_timeout) test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout) test.add_cmd("-c get_rsc_info -r rsc2 ") test.add_cmd("-c unregister_rsc -r rsc2 "+self.action_timeout) test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ") ### Register duplicate, verify only one entry exists and can still be removed. test = self.new_test("duplicate_registration", "Register resource multiple times, verify only one entry exists and can be removed.") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker "+self.action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Dummy") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Stateful -P pacemaker "+self.action_timeout) test.add_cmd_check_stdout("-c get_rsc_info -r rsc2 ", "id:rsc2 class:ocf provider:pacemaker type:Stateful") test.add_cmd("-c unregister_rsc -r rsc2 "+self.action_timeout) test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ") ### verify the option to only send notification to the original client. ### test = self.new_test("notify_orig_client_only", "Verify option to only send notifications to the client originating the action.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd('-c exec -r \"test_rsc\" -a \"monitor\" -i 1s ' + self.action_timeout + ' -n ' '-l "NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete"') # this will fail because the monitor notifications should only go to the original caller, which no longer exists. test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" "+self.action_timeout, CrmExit.TIMEOUT) test.add_cmd('-c cancel -r test_rsc -a monitor -i 1s -t 6000 ') test.add_cmd("-c unregister_rsc -r \"test_rsc\" "+self.action_timeout+ "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### get metadata ### test = self.new_test("get_ocf_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Dummy\"", "resource-agent name=\"Dummy\"") test.add_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Stateful\"") test.add_expected_fail_cmd("-c metadata -P \"pacemaker\" -T \"Stateful\"") test.add_expected_fail_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"fake_agent\"") ### get metadata ### test = self.new_test("get_lsb_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"lsb\" -T \"LSBDummy\"", "resource-agent name='LSBDummy'") ### get stonith metadata ### test = self.new_test("get_stonith_metadata", "Retrieve stonith metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy\"", "resource-agent name=\"fence_dummy\"") ### get metadata ### if "systemd" in self.rsc_classes: test = self.new_test("get_systemd_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"systemd\" -T \"pacemaker-cts-dummyd@\"", "resource-agent name=\"pacemaker-cts-dummyd@\"") ### get metadata ### if "upstart" in self.rsc_classes: test = self.new_test("get_upstart_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"upstart\" -T \"pacemaker-cts-dummyd\"", "resource-agent name=\"pacemaker-cts-dummyd\"") ### get ocf providers ### test = self.new_test("list_ocf_providers", "Retrieve list of available resource providers, verifies pacemaker is a provider.") test.add_cmd_check_stdout("-c list_ocf_providers ", "pacemaker") test.add_cmd_check_stdout("-c list_ocf_providers -T ping", "pacemaker") ### Verify agents only exist in their lists ### test = self.new_test("verify_agent_lists", "Verify the agent lists contain the right data.") test.add_cmd_check_stdout("-c list_agents ", "Stateful") ### ocf ### test.add_cmd_check_stdout("-c list_agents -C ocf", "Stateful") test.add_cmd_check_stdout("-c list_agents -C lsb", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C service", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents ", "LSBDummy") ### init.d ### test.add_cmd_check_stdout("-c list_agents -C lsb", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C ocf", "", "pacemaker-cts-dummyd@") ### should not exist test.add_cmd_check_stdout("-c list_agents -C ocf", "", "pacemaker-cts-dummyd@") ### should not exist test.add_cmd_check_stdout("-c list_agents -C lsb", "", "fence_dummy") ### should not exist test.add_cmd_check_stdout("-c list_agents -C service", "", "fence_dummy") ### should not exist test.add_cmd_check_stdout("-c list_agents -C ocf", "", "fence_dummy") ### should not exist if "systemd" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents ", "pacemaker-cts-dummyd@") ### systemd ### test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C systemd", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C systemd", "pacemaker-cts-dummyd@") test.add_cmd_check_stdout("-c list_agents -C systemd", "", "fence_dummy") ### should not exist if "upstart" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents ", "pacemaker-cts-dummyd") ### upstart ### test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C upstart", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C upstart", "pacemaker-cts-dummyd") test.add_cmd_check_stdout("-c list_agents -C upstart", "", "fence_dummy") ### should not exist if "stonith" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents -C stonith", "fence_dummy") ### stonith ### test.add_cmd_check_stdout("-c list_agents -C stonith", "", "pacemaker-cts-dummyd@") ### should not exist test.add_cmd_check_stdout("-c list_agents -C stonith", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents ", "fence_dummy") def print_list(self): """ List all registered tests """ print("\n==== %d TESTS FOUND ====" % (len(self.tests))) print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")) print("%35s - %s" % ("--------------------", "--------------------")) for test in self.tests: print("%35s - %s" % (test.name, test.description)) print("==== END OF LIST ====\n") def run_single(self, name): """ Run a single named test """ for test in self.tests: if test.name == name: test.run() break def run_tests_matching(self, pattern): """ Run all tests whose name matches a pattern """ for test in self.tests: if test.name.count(pattern) != 0: test.run() def run_tests(self): """ Run all tests """ for test in self.tests: test.run() def exit(self): """ Exit (with error status code if any test failed) """ for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != CrmExit.OK: sys.exit(CrmExit.ERROR) sys.exit(CrmExit.OK) def print_results(self): """ Print summary of results of executed tests """ failures = 0 success = 0 print("\n\n======= FINAL RESULTS ==========") print("\n--- FAILURE RESULTS:") for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != CrmExit.OK: failures = failures + 1 test.print_result(" ") else: success = success + 1 if failures == 0: print(" None") print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)) class TestOptions(object): """ Option handler """ def __init__(self): self.options = {} self.options['list-tests'] = 0 self.options['run-all'] = 1 self.options['run-only'] = "" self.options['run-only-pattern'] = "" self.options['verbose'] = 0 self.options['timeout'] = 2 self.options['force-wait'] = 0 self.options['invalid-arg'] = "" self.options['show-usage'] = 0 self.options['pacemaker-remote'] = 0 def build_options(self, argv): """ Set options based on command-line arguments """ args = argv[1:] skip = 0 for i in range(0, len(args)): if skip: skip = 0 continue elif args[i] == "-h" or args[i] == "--help": self.options['show-usage'] = 1 elif args[i] == "-l" or args[i] == "--list-tests": self.options['list-tests'] = 1 elif args[i] == "-V" or args[i] == "--verbose": self.options['verbose'] = 1 elif args[i] == "-t" or args[i] == "--timeout": self.options['timeout'] = float(args[i+1]) elif args[i] == "-w" or args[i] == "--force-wait": self.options['force-wait'] = 1 elif args[i] == "-R" or args[i] == "--pacemaker-remote": self.options['pacemaker-remote'] = 1 elif args[i] == "-r" or args[i] == "--run-only": self.options['run-only'] = args[i+1] skip = 1 elif args[i] == "-p" or args[i] == "--run-only-pattern": self.options['run-only-pattern'] = args[i+1] skip = 1 def show_usage(self): """ Show command usage """ print("usage: " + sys.argv[0] + " [options]") print("If no options are provided, all tests will run") print("Options:") print("\t [--help | -h] Show usage") print("\t [--list-tests | -l] Print out all registered tests.") print("\t [--run-only | -r 'testname'] Run a specific test") print("\t [--verbose | -V] Verbose output") print("\t [--timeout | -t 'floating point number']" "\n\t\tUp to how many seconds each test case waits for the daemon to be initialized." "\n\t\tDefaults to 2. The value 0 means no limit.") print("\t [--force-wait | -w]" "\n\t\tEach test case waits the default/specified --timeout for the daemon without tracking the log.") print("\t [--pacemaker-remote | -R Test pacemaker-remoted binary instead of pacemaker-execd") print("\t [--run-only-pattern | -p 'string'] Run only tests containing the string value") print("\n\tExample: Run only the test 'start_stop'") print("\t\t " + sys.argv[0] + " --run-only start_stop") print("\n\tExample: Run only the tests with the string 'systemd' present in them") print("\t\t " + sys.argv[0] + " --run-only-pattern systemd") def main(argv): """ Run pacemaker-execd regression tests as specified by arguments """ update_path() opts = TestOptions() opts.build_options(argv) tests = Tests(opts.options['verbose'], opts.options['pacemaker-remote'], opts.options['timeout'], opts.options['force-wait']) tests.build_generic_tests() tests.build_multi_rsc_tests() tests.build_negative_tests() tests.build_custom_tests() tests.build_stress_tests() tests.setup_test_environment() print("Starting ...") if opts.options['list-tests']: tests.print_list() elif opts.options['show-usage']: opts.show_usage() elif opts.options['run-only-pattern'] != "": tests.run_tests_matching(opts.options['run-only-pattern']) tests.print_results() elif opts.options['run-only'] != "": tests.run_single(opts.options['run-only']) tests.print_results() else: tests.run_tests() tests.print_results() tests.cleanup_test_environment() tests.exit() if __name__ == "__main__": main(sys.argv) diff --git a/cts/cts-fencing.in b/cts/cts-fencing.in index ced4cbe804..13b8518d28 100644 --- a/cts/cts-fencing.in +++ b/cts/cts-fencing.in @@ -1,1603 +1,1588 @@ #!@PYTHON@ """ Regression tests for Pacemaker's fencer """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2012-2019 the Pacemaker project contributors" +__copyright__ = "Copyright 2012-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import re import sys import subprocess import shlex import time import tempfile import signal # Where to find test binaries # Prefer the source tree if available BUILD_DIR = "@abs_top_builddir@" SCHEMA_DIR = "@CRM_SCHEMA_DIRECTORY@" TEST_DIR = sys.path[0] AUTOGEN_COROSYNC_TEMPLATE = """ totem { version: 2 cluster_name: cts-fencing crypto_cipher: none crypto_hash: none transport: udp } nodelist { node { nodeid: 1 name: %s ring0_addr: 127.0.0.1 } } logging { debug: off to_syslog: no to_stderr: no to_logfile: yes logfile: %s } """ # These values must be kept in sync with include/crm/crm.h class CrmExit(object): OK = 0 ERROR = 1 INVALID_PARAM = 2 UNIMPLEMENT_FEATURE = 3 INSUFFICIENT_PRIV = 4 NOT_INSTALLED = 5 NOT_CONFIGURED = 6 NOT_RUNNING = 7 USAGE = 64 DATAERR = 65 NOINPUT = 66 NOUSER = 67 NOHOST = 68 UNAVAILABLE = 69 SOFTWARE = 70 OSERR = 71 OSFILE = 72 CANTCREAT = 73 IOERR = 74 TEMPFAIL = 75 PROTOCOL = 76 NOPERM = 77 CONFIG = 78 FATAL = 100 PANIC = 101 DISCONNECT = 102 SOLO = 103 DIGEST = 104 NOSUCH = 105 QUORUM = 106 UNSAFE = 107 EXISTS = 108 MULTIPLE = 109 OLD = 110 TIMEOUT = 124 MAX = 255 def update_path(): """ Set the PATH environment variable appropriately for the tests """ new_path = os.environ['PATH'] if os.path.exists("%s/cts-fencing.in" % TEST_DIR): print("Running tests from the source tree: %s (%s)" % (BUILD_DIR, TEST_DIR)) # For pacemaker-fenced and cts-fence-helper new_path = "%s/daemons/fenced:%s" % (BUILD_DIR, new_path) new_path = "%s/tools:%s" % (BUILD_DIR, new_path) # For stonith_admin new_path = "%s/cts:%s" % (BUILD_DIR, new_path) # For cts-support else: print("Running tests from the install tree: @CRM_DAEMON_DIR@ (not %s)" % TEST_DIR) # For pacemaker-fenced, cts-fence-helper, and cts-support new_path = "@CRM_DAEMON_DIR@:%s" % (new_path) print('Using PATH="{}"'.format(new_path)) os.environ['PATH'] = new_path def find_validator(rng_file): if os.access("/usr/bin/xmllint", os.X_OK): return ["xmllint", "--relaxng", rng_file, "-"] else: return None def rng_directory(): if "PCMK_schema_directory" in os.environ: return os.environ["PCMK_schema_directory"] elif os.path.exists("%s/cts-fencing.in" % TEST_DIR): return "xml" else: return SCHEMA_DIR -def pipe_communicate(pipes, stdout=True, stderr=False, stdin=None): - """ Wrapper to get text output from pipes regardless of Python version """ - - output = "" +def pipe_communicate(pipes, stderr=False, stdin=None): + """ Get text output from pipes """ - if stdin: - if sys.version_info < (3,): - pipe_outputs = pipes.communicate(input=stdin) - else: - pipe_outputs = pipes.communicate(input=stdin.encode()) + if stdin is not None: + pipe_outputs = pipes.communicate(input=stdin.encode()) else: pipe_outputs = pipes.communicate() - if sys.version_info < (3,): - if stdout: - output = output + pipe_outputs[0] - if stderr: - output = output + pipe_outputs[1] - else: - if stdout: - output = output + pipe_outputs[0].decode(sys.stdout.encoding) - if stderr: - output = output + pipe_outputs[1].decode(sys.stderr.encoding) + output = pipe_outputs[0].decode(sys.stdout.encoding) + if stderr: + output = output + pipe_outputs[1].decode(sys.stderr.encoding) return output def output_from_command(command): """ Execute command and return its standard output """ test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE) test.wait() return pipe_communicate(test).split("\n") def localname(): """ Return the uname of the local host """ our_uname = output_from_command("uname -n") if our_uname: our_uname = our_uname[0] else: our_uname = "localhost" return our_uname def killall(process): """ Kill all instances of a process """ cmd = shlex.split("killall -9 -q %s" % process) test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() class TestError(Exception): """ Base class for exceptions in this module """ pass class ExitCodeError(TestError): """ Exception raised when command exit status is unexpected """ def __init__(self, exit_code): self.exit_code = exit_code def __str__(self): return repr(self.exit_code) class OutputNotFoundError(TestError): """ Exception raised when command output does not contain wanted string """ def __init__(self, output): self.output = output def __str__(self): return repr(self.output) class OutputFoundError(TestError): """ Exception raised when command output contains unwanted string """ def __init__(self, output): self.output = output def __str__(self): return repr(self.output) class XmlValidationError(TestError): """ Exception raised when xmllint fails """ def __init__(self, output): self.output = output def __str__(self): return repr(self.output) class Test(object): """ Executor for a single test """ def __init__(self, name, description, verbose=0, with_cpg=0, timeout=2, force_wait=0): self.name = name self.description = description self.cmds = [] self.verbose = verbose self.timeout = timeout self.force_wait = force_wait self.logpath = "/tmp/stonith-regression.log" self.result_txt = "" self.cmd_tool_output = "" self.result_exitcode = CrmExit.OK if with_cpg: self.stonith_options = "-c" self.enable_corosync = 1 else: self.stonith_options = "-s" self.enable_corosync = 0 self.stonith_process = None self.stonith_output = "" self.stonith_patterns = [] self.negative_stonith_patterns = [] self.executed = 0 def __new_cmd(self, cmd, args, exitcode, stdout_match="", no_wait=0, stdout_negative_match="", kill=None, validate=True): """ Add a command to be executed as part of this test """ self.cmds.append( { "cmd" : cmd, "kill" : kill, "args" : args, "expected_exitcode" : exitcode, "stdout_match" : stdout_match, "stdout_negative_match" : stdout_negative_match, "no_wait" : no_wait, "validate" : validate, } ) def start_environment(self): """ Prepare the host for executing a test """ # Make sure we are in full control killall("pacemakerd") killall("pacemaker-fenced") if self.verbose: self.stonith_options = self.stonith_options + " -V" print("Starting pacemaker-fenced with %s" % self.stonith_options) if os.path.exists(self.logpath): os.remove(self.logpath) cmd = "pacemaker-fenced %s -l %s" % (self.stonith_options, self.logpath) self.stonith_process = subprocess.Popen(shlex.split(cmd)) logfile = None init_time = time.time() update_time = init_time while True: time.sleep(0.1) if self.force_wait == 0 and logfile == None \ and os.path.exists(self.logpath): logfile = io.open(self.logpath, 'rt', encoding = "ISO-8859-1") if self.force_wait == 0 and logfile != None: for line in logfile.readlines(): if "successfully started" in line: return now = time.time() if self.timeout > 0 and (now - init_time) >= self.timeout: if self.force_wait == 0: print("\tDaemon pacemaker-fenced doesn't seem to have been initialized within %fs." "\n\tConsider specifying a longer '--timeout' value." %(self.timeout)) return if self.verbose and (now - update_time) >= 5: print("Waiting for pacemaker-fenced to be initialized: %fs ..." %(now - init_time)) update_time = now def clean_environment(self): """ Clean up the host after executing a test """ if self.stonith_process: if self.stonith_process.poll() == None: self.stonith_process.terminate() self.stonith_process.wait() else: return_code = { getattr(signal, _signame): _signame for _signame in dir(signal) if _signame.startswith('SIG') and not _signame.startswith("SIG_") }.get(-self.stonith_process.returncode, "RET=%d" % (self.stonith_process.returncode)) msg = "FAILURE - '%s' failed. pacemaker-fenced abnormally exited during test (%s)." self.result_txt = msg % (self.name, return_code) self.result_exitcode = CrmExit.ERROR self.stonith_output = "" self.stonith_process = None # the default for utf-8 encoding would error out if e.g. memory corruption # makes fenced output any kind of 8 bit value - while still interesting # for debugging and we'd still like the regression-test to go over the # full set of test-cases logfile = io.open(self.logpath, 'rt', encoding = "ISO-8859-1") for line in logfile.readlines(): self.stonith_output = self.stonith_output + line if self.verbose: print("Daemon Output Start") print(self.stonith_output) print("Daemon Output End") os.remove(self.logpath) def add_stonith_log_pattern(self, pattern): """ Add a log pattern to expect from this test """ self.stonith_patterns.append(pattern) def add_stonith_neg_log_pattern(self, pattern): """ Add a log pattern that should not occur with this test """ self.negative_stonith_patterns.append(pattern) def add_cmd(self, cmd, args, validate=True): """ Add a simple command to be executed as part of this test """ self.__new_cmd(cmd, args, CrmExit.OK, "", validate=validate) def add_cmd_no_wait(self, cmd, args): """ Add a simple command to be executed (without waiting) as part of this test """ self.__new_cmd(cmd, args, CrmExit.OK, "", 1) def add_cmd_check_stdout(self, cmd, args, match, no_match=""): """ Add a simple command with expected output to be executed as part of this test """ self.__new_cmd(cmd, args, CrmExit.OK, match, 0, no_match) def add_expected_fail_cmd(self, cmd, args, exitcode=CrmExit.ERROR): """ Add a command to be executed as part of this test and expected to fail """ self.__new_cmd(cmd, args, exitcode, "") def get_exitcode(self): """ Return the exit status of the last test execution """ return self.result_exitcode def print_result(self, filler): """ Print the result of the last test execution """ print("%s%s" % (filler, self.result_txt)) def run_cmd(self, args): """ Execute a command as part of this test """ cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print("\n\nRunning: "+" ".join(cmd)) test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if args['kill']: if self.verbose: print("Also running: "+args['kill']) subprocess.Popen(shlex.split(args['kill'])) if args['no_wait'] == 0: test.wait() else: return CrmExit.OK output = pipe_communicate(test, stderr=True) if self.verbose: print(output) if test.returncode != args['expected_exitcode']: raise ExitCodeError(test.returncode) if (args['stdout_match'] != "" and re.search(args['stdout_match'], output) is None): raise OutputNotFoundError(output) if (args['stdout_negative_match'] != "" and re.search(args['stdout_negative_match'], output) is not None): raise OutputFoundError(output) if args['validate']: rng_file = rng_directory() + "/api/api-result.rng" cmd = find_validator(rng_file) if not cmd: return if self.verbose: print("\nRunning: "+" ".join(cmd)) validator = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = pipe_communicate(validator, stderr=True, stdin=output) if self.verbose: print(output) if validator.returncode != 0: raise XmlValidationError(output) def count_negative_matches(self, outline): """ Return 1 if a line matches patterns that shouldn't have occurred """ count = 0 for line in self.negative_stonith_patterns: if outline.count(line): count = 1 if self.verbose: print("This pattern should not have matched = '%s" % (line)) return count def match_stonith_patterns(self): """ Check test output for expected patterns """ negative_matches = 0 cur = 0 pats = self.stonith_patterns total_patterns = len(self.stonith_patterns) if len(self.stonith_patterns) == 0 and len(self.negative_stonith_patterns) == 0: return for line in self.stonith_output.split("\n"): negative_matches = negative_matches + self.count_negative_matches(line) if len(pats) == 0: continue cur = -1 for pat in pats: cur = cur + 1 if line.count(pats[cur]): del pats[cur] break if len(pats) > 0 or negative_matches: if self.verbose: for pat in pats: print("Pattern Not Matched = '%s'" % pat) msg = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." self.result_txt = msg % (self.name, len(pats), total_patterns, negative_matches) self.result_exitcode = CrmExit.ERROR def set_error(self, step, cmd): """ Record failure of this test """ msg = "FAILURE - '%s' failed at step %d. Command: %s %s" self.result_txt = msg % (self.name, step, cmd['cmd'], cmd['args']) self.result_exitcode = CrmExit.ERROR def run(self): """ Execute this test. """ res = 0 i = 1 self.start_environment() if self.verbose: print("\n--- START TEST - %s" % self.name) self.result_txt = "SUCCESS - '%s'" % (self.name) self.result_exitcode = CrmExit.OK for cmd in self.cmds: try: self.run_cmd(cmd) except ExitCodeError as e: print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode'])) self.set_error(i, cmd); break except OutputNotFoundError as e: print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e)) self.set_error(i, cmd); break except OutputFoundError as e: print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_negative_match'], e)) self.set_error(i, cmd); break if self.verbose: print("Step %d SUCCESS" % (i)) i = i + 1 self.clean_environment() if self.result_exitcode == CrmExit.OK: self.match_stonith_patterns() print(self.result_txt) if self.verbose: print("--- END TEST - %s\n" % self.name) self.executed = 1 return res class Tests(object): """ Collection of all fencing regression tests """ def __init__(self, verbose=0, timeout=2, force_wait=0): self.tests = [] self.verbose = verbose self.timeout = timeout self.force_wait = force_wait self.autogen_corosync_cfg = not os.path.exists("/etc/corosync/corosync.conf") def new_test(self, name, description, with_cpg=0): """ Create a named test """ test = Test(name, description, self.verbose, with_cpg, self.timeout, self.force_wait) self.tests.append(test) return test def print_list(self): """ List all registered tests """ print("\n==== %d TESTS FOUND ====" % (len(self.tests))) print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")) print("%35s - %s" % ("--------------------", "--------------------")) for test in self.tests: print("%35s - %s" % (test.name, test.description)) print("==== END OF LIST ====\n") def start_corosync(self): """ Start the corosync process """ if self.verbose: print("Starting corosync") test = subprocess.Popen("corosync", stdout=subprocess.PIPE) test.wait() time.sleep(10) def run_single(self, name): """ Run a single named test """ for test in self.tests: if test.name == name: test.run() break def run_tests_matching(self, pattern): """ Run all tests whose name matches a pattern """ for test in self.tests: if test.name.count(pattern) != 0: test.run() def run_cpg_only(self): """ Run all corosync-enabled tests """ for test in self.tests: if test.enable_corosync: test.run() def run_no_cpg(self): """ Run all standalone tests """ for test in self.tests: if not test.enable_corosync: test.run() def run_tests(self): """ Run all tests """ for test in self.tests: test.run() def exit(self): """ Exit (with error status code if any test failed) """ for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != CrmExit.OK: sys.exit(CrmExit.ERROR) sys.exit(CrmExit.OK) def print_results(self): """ Print summary of results of executed tests """ failures = 0 success = 0 print("\n\n======= FINAL RESULTS ==========") print("\n--- FAILURE RESULTS:") for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != CrmExit.OK: failures = failures + 1 test.print_result(" ") else: success = success + 1 if failures == 0: print(" None") print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)) def build_api_sanity_tests(self): """ Register tests to verify basic API usage """ verbose_arg = "" if self.verbose: verbose_arg = "-V" test = self.new_test("standalone_low_level_api_test", "Sanity test client api in standalone mode.") test.add_cmd("cts-fence-helper", "-t %s" % (verbose_arg), validate=False) test = self.new_test("cpg_low_level_api_test", "Sanity test client api using mainloop and cpg.", 1) test.add_cmd("cts-fence-helper", "-m %s" % (verbose_arg), validate=False) def build_custom_timeout_tests(self): """ Register tests to verify custom timeout usage """ # custom timeout without topology test = self.new_test("cpg_custom_timeout_1", "Verify per device timeouts work as expected without using topology.", 1) test.add_cmd('stonith_admin', '--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node1 node2 node3"') test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=1"') test.add_cmd('stonith_admin', '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4"') test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") # timeout is 5+1+4 = 10 test.add_stonith_log_pattern("Total timeout set to 10") # custom timeout _WITH_ topology test = self.new_test("cpg_custom_timeout_2", "Verify per device timeouts work as expected _WITH_ topology.", 1) test.add_cmd('stonith_admin', '--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node1 node2 node3"') test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=1"') test.add_cmd('stonith_admin', '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node3" -o "pcmk_off_timeout=4000"') test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") # timeout is 5+1+4000 = 4006 test.add_stonith_log_pattern("Total timeout set to 4006") def build_fence_merge_tests(self): """ Register tests to verify when fence operations should be merged """ ### Simple test that overlapping fencing operations get merged test = self.new_test("cpg_custom_merge_single", "Verify overlapping identical fencing operations are merged, no fencing levels used.", 1) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 10") ### one merger will happen test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") ### Test that multiple mergers occur test = self.new_test("cpg_custom_merge_multiple", "Verify multiple overlapping identical fencing operations are merged", 1) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"delay=2\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 10") ### 4 mergers should occur test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") ### Test that multiple mergers occur with topologies used test = self.new_test("cpg_custom_merge_with_topology", "Verify multiple overlapping identical fencing operations are merged with fencing levels.", 1) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true1") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 10") ### 4 mergers should occur test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") test.add_stonith_log_pattern("Merging fencing action 'off' targeting node3 originating from client") ### the pattern below signifies that both the original and duplicate operation completed test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") test.add_stonith_log_pattern("Operation 'off' targeting node3 by ") def build_fence_no_merge_tests(self): """ Register tests to verify when fence operations should not be merged """ test = self.new_test("cpg_custom_no_merge", "Verify differing fencing operations are not merged", 1) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3 node2\" ") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true1") test.add_cmd_no_wait("stonith_admin", "--output-as=xml -F node2 -t 10") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 10") test.add_stonith_neg_log_pattern("Merging fencing action 'off' targeting node3 originating from client") def build_standalone_tests(self): """ Register a grab bag of tests that can be executed in standalone or corosync mode """ test_types = [ { "prefix" : "standalone", "use_cpg" : 0, }, { "prefix" : "cpg", "use_cpg" : 1, }, ] # test what happens when all devices timeout for test_type in test_types: test = self.new_test("%s_fence_multi_device_failure" % test_type["prefix"], "Verify that all devices timeout, a fencing failure is returned.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false3 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") if test_type["use_cpg"] == 1: test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -F node3 -t 2", CrmExit.TIMEOUT) test.add_stonith_log_pattern("Total timeout set to 6") else: test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -F node3 -t 2", CrmExit.ERROR) test.add_stonith_log_pattern("targeting node3 using false1 returned ") test.add_stonith_log_pattern("targeting node3 using false2 returned ") test.add_stonith_log_pattern("targeting node3 using false3 returned ") # test what happens when multiple devices can fence a node, but the first device fails. for test_type in test_types: test = self.new_test("%s_fence_device_failure_rollover" % test_type["prefix"], "Verify that when one fence device fails for a node, the others are tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") if test_type["use_cpg"] == 1: test.add_stonith_log_pattern("Total timeout set to 15") # test what happens when we try to use a missing fence-agent. for test_type in test_types: test = self.new_test("%s_fence_missing_agent" % test_type["prefix"], "Verify proper error-handling when using a non-existent fence-agent.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_missing -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node2\"") test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -F node3 -t 5", CrmExit.ERROR) test.add_cmd("stonith_admin", "--output-as=xml -F node2 -t 5") # simple topology test for one device for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_simple" % test_type["prefix"], "Verify all fencing devices at a level are used.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") test.add_stonith_log_pattern("Total timeout set to 5") test.add_stonith_log_pattern("targeting node3 using true returned 0") # add topology, delete topology, verify fencing still works for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_add_remove" % test_type["prefix"], "Verify fencing occurrs after all topology levels are removed", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true") test.add_cmd("stonith_admin", "--output-as=xml -d node3 -i 1") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") test.add_stonith_log_pattern("Total timeout set to 5") test.add_stonith_log_pattern("targeting node3 using true returned 0") # test what happens when the first fencing level has multiple devices. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_device_fails" % test_type["prefix"], "Verify if one device in a level fails, the other is tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R false -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 20") test.add_stonith_log_pattern("Total timeout set to 40") test.add_stonith_log_pattern("targeting node3 using false returned -201") test.add_stonith_log_pattern("targeting node3 using true returned 0") # test what happens when the first fencing level fails. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_multi_level_fails" % test_type["prefix"], "Verify if one level fails, the next leve is tried.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true4") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 3") test.add_stonith_log_pattern("Total timeout set to 18") test.add_stonith_log_pattern("targeting node3 using false1 returned -201") test.add_stonith_log_pattern("targeting node3 using false2 returned -201") test.add_stonith_log_pattern("targeting node3 using true3 returned 0") test.add_stonith_log_pattern("targeting node3 using true4 returned 0") # test what happens when the first fencing level had devices that no one has registered for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_missing_devices" % test_type["prefix"], "Verify topology can continue with missing devices.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true4") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") # Test what happens if multiple fencing levels are defined, and then the first one is removed. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_level_removal" % test_type["prefix"], "Verify level removal works.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v false2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 3 -v true4") # Now remove level 2, verify none of the devices in level two are hit. test.add_cmd("stonith_admin", "--output-as=xml -d node3 -i 2") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 20") test.add_stonith_log_pattern("Total timeout set to 8") test.add_stonith_log_pattern("targeting node3 using false1 returned -201") test.add_stonith_neg_log_pattern("targeting node3 using false2 returned ") test.add_stonith_log_pattern("targeting node3 using true3 returned 0") test.add_stonith_log_pattern("targeting node3 using true4 returned 0") # Test targeting a topology level by node name pattern. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_level_pattern" % test_type["prefix"], "Verify targeting topology by node name pattern works.", test_type["use_cpg"]) test.add_cmd("stonith_admin", """--output-as=xml -R true -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node1 node2 node3" """) test.add_cmd("stonith_admin", """--output-as=xml -r '@node.*' -i 1 -v true""") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5") test.add_stonith_log_pattern("targeting node3 using true returned 0") # test allowing commas and semicolons as delimiters in pcmk_host_list for test_type in test_types: test = self.new_test("%s_host_list_delimiters" % test_type["prefix"], "Verify commas and semicolons can be used as pcmk_host_list delimiters", test_type["use_cpg"]) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node1,node2,node3" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=pcmk1;pcmk2;pcmk3" """) test.add_cmd("stonith_admin", "stonith_admin --output-as=xml -F node2 -t 5") test.add_cmd("stonith_admin", "stonith_admin --output-as=xml -F pcmk3 -t 5") test.add_stonith_log_pattern("targeting node2 using true1 returned 0") test.add_stonith_log_pattern("targeting pcmk3 using true2 returned 0") # test the stonith builds the correct list of devices that can fence a node. for test_type in test_types: test = self.new_test("%s_list_devices" % test_type["prefix"], "Verify list of devices that can fence a node is correct", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -l node1 -V", "true2", "true1") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -l node1 -V", "true3", "true1") # simple test of device monitor for test_type in test_types: test = self.new_test("%s_monitor" % test_type["prefix"], "Verify device is reachable", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -Q true1") test.add_cmd("stonith_admin", "--output-as=xml -Q false1") test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -Q true2", CrmExit.ERROR) # Verify monitor occurs for duration of timeout period on failure for test_type in test_types: test = self.new_test("%s_monitor_timeout" % test_type["prefix"], "Verify monitor uses duration of timeout period given.", test_type["use_cpg"]) test.add_cmd("stonith_admin", '--output-as=xml -R true1 -a fence_dummy -o "mode=fail" -o "monitor_mode=fail" -o "pcmk_host_list=node3"') test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -Q true1 -t 5", CrmExit.ERROR) test.add_stonith_log_pattern("Attempt 2 to execute") # Verify monitor occurs for duration of timeout period on failure, but stops at max retries for test_type in test_types: test = self.new_test("%s_monitor_timeout_max_retries" % test_type["prefix"], "Verify monitor retries until max retry value or timeout is hit.", test_type["use_cpg"]) test.add_cmd("stonith_admin", '--output-as=xml -R true1 -a fence_dummy -o "mode=fail" -o "monitor_mode=fail" -o "pcmk_host_list=node3"') test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -Q true1 -t 15", CrmExit.ERROR) test.add_stonith_log_pattern("Attempted to execute agent fence_dummy (list) the maximum number of times") # simple register test for test_type in test_types: test = self.new_test("%s_register" % test_type["prefix"], "Verify devices can be registered and un-registered", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -Q true1") test.add_cmd("stonith_admin", "--output-as=xml -D true1") test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -Q true1", CrmExit.ERROR) # simple reboot test for test_type in test_types: test = self.new_test("%s_reboot" % test_type["prefix"], "Verify devices can be rebooted", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -B node3 -t 5") test.add_cmd("stonith_admin", "--output-as=xml -D true1") test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -Q true1", CrmExit.ERROR) # test fencing history. for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_fence_history" % test_type["prefix"], "Verify last fencing operation is returned.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") test.add_cmd("stonith_admin", "--output-as=xml -F node3 -t 5 -V") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -H node3", 'action="off" target="node3" .* status="success"') # simple test of dynamic list query for test_type in test_types: test = self.new_test("%s_dynamic_list_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -l fake_port_1", 'count="3"') # fence using dynamic list query for test_type in test_types: test = self.new_test("%s_fence_dynamic_list_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o mode=pass -o mock_dynamic_hosts=fake_port_1") test.add_cmd("stonith_admin", "--output-as=xml -F fake_port_1 -t 5 -V") # simple test of query using status action for test_type in test_types: test = self.new_test("%s_status_query" % test_type["prefix"], "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") test.add_cmd_check_stdout("stonith_admin", "--output-as=xml -l fake_port_1", 'count="3"') # test what happens when no reboot action is advertised for test_type in test_types: test = self.new_test("%s_no_reboot_support" % test_type["prefix"], "Verify reboot action defaults to off when no reboot action is advertised by agent.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy_no_reboot -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -B node1 -t 5 -V") test.add_stonith_log_pattern("does not advertise support for 'reboot', performing 'off'") test.add_stonith_log_pattern("using true1 returned 0 (OK)") # make sure reboot is used when reboot action is advertised for test_type in test_types: test = self.new_test("%s_with_reboot_support" % test_type["prefix"], "Verify reboot action can be used when metadata advertises it.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -B node1 -t 5 -V") test.add_stonith_neg_log_pattern("does not advertise support for 'reboot', performing 'off'") test.add_stonith_log_pattern("using true1 returned 0 (OK)") # make sure requested fencing delay is applied only for the first device in the first level # make sure static delay from pcmk_delay_base is added for test_type in test_types: if test_type["use_cpg"] == 0: continue test = self.new_test("%s_topology_delay" % test_type["prefix"], "Verify requested fencing delay is applied only for the first device in the first level and pcmk_delay_base is added.", test_type["use_cpg"]) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\" -o \"pcmk_delay_base=1\"") test.add_cmd("stonith_admin", "--output-as=xml -R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\" -o \"pcmk_delay_base=1\"") test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -r node3 -i 2 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -F node3 --delay 1") test.add_stonith_log_pattern("Delaying 'off' action targeting node3 using true1 for 2s | timeout=120s requested_delay=1s base=1s max=1s") test.add_stonith_log_pattern("Delaying 'off' action targeting node3 using false1 for 1s | timeout=120s requested_delay=0s base=1s max=1s") test.add_stonith_neg_log_pattern("Delaying 'off' action targeting node3 using true2") test.add_stonith_neg_log_pattern("Delaying 'off' action targeting node3 using true3") def build_nodeid_tests(self): """ Register tests that use a corosync node id """ our_uname = localname() ### verify nodeid is supplied when nodeid is in the metadata parameters test = self.new_test("cpg_supply_nodeid", "Verify nodeid is given when fence agent has nodeid as parameter", 1) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -F %s -t 3" % (our_uname)) test.add_stonith_log_pattern("as nodeid with fence action 'off' targeting %s" % (our_uname)) ### verify nodeid is _NOT_ supplied when nodeid is not in the metadata parameters test = self.new_test("cpg_do_not_supply_nodeid", "Verify nodeid is _NOT_ given when fence agent does not have nodeid as parameter", 1) # use a host name that won't be in corosync.conf test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=regr-test\"") test.add_cmd("stonith_admin", "--output-as=xml -F regr-test -t 3") test.add_stonith_neg_log_pattern("as nodeid with fence action 'off' targeting regr-test") ### verify nodeid use doesn't explode standalone mode test = self.new_test("standalone_do_not_supply_nodeid", "Verify nodeid in metadata parameter list doesn't kill standalone mode", 0) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -F %s -t 3" % (our_uname)) test.add_stonith_neg_log_pattern("as nodeid with fence action 'off' targeting %s" % (our_uname)) def build_unfence_tests(self): """ Register tests that verify unfencing """ our_uname = localname() ### verify unfencing using automatic unfencing test = self.new_test("cpg_unfence_required_1", "Verify require unfencing on all devices when automatic=true in agent's metadata", 1) test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) # both devices should be executed test.add_stonith_log_pattern("using true1 returned 0 (OK)") test.add_stonith_log_pattern("using true2 returned 0 (OK)") ### verify unfencing using automatic unfencing fails if any of the required agents fail test = self.new_test("cpg_unfence_required_2", "Verify require unfencing on all devices when automatic=true in agent's metadata", 1) test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true2 -a fence_dummy_auto_unfence -o "mode=fail" -o "pcmk_host_list=%s"' % (our_uname)) test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -U %s -t 6" % (our_uname), CrmExit.ERROR) ### verify unfencing using automatic devices with topology test = self.new_test("cpg_unfence_required_3", "Verify require unfencing on all devices even when at different topology levels", 1) test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) test.add_stonith_log_pattern("using true1 returned 0 (OK)") test.add_stonith_log_pattern("using true2 returned 0 (OK)") ### verify unfencing using automatic devices with topology test = self.new_test("cpg_unfence_required_4", "Verify all required devices are executed even with topology levels fail.", 1) test.add_cmd('stonith_admin', '--output-as=xml -R true1 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true2 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true3 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R true4 -a fence_dummy_auto_unfence -o "mode=pass" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R false3 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd('stonith_admin', '--output-as=xml -R false4 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=%s node3"' % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 1 -v false1" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v false2" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v false3" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v true3" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 3 -v false4" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 4 -v true4" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) test.add_stonith_log_pattern("using true1 returned 0 (OK)") test.add_stonith_log_pattern("using true2 returned 0 (OK)") test.add_stonith_log_pattern("using true3 returned 0 (OK)") test.add_stonith_log_pattern("using true4 returned 0 (OK)") def build_unfence_on_target_tests(self): """ Register tests that verify unfencing that runs on the target """ our_uname = localname() ### verify unfencing using on_target device test = self.new_test("cpg_unfence_on_target_1", "Verify unfencing with on_target = true", 1) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) test.add_stonith_log_pattern("(on) to be executed on target") ### verify failure of unfencing using on_target device test = self.new_test("cpg_unfence_on_target_2", "Verify failure unfencing with on_target = true", 1) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake_1234\"" % (our_uname)) test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -U node_fake_1234 -t 3", CrmExit.ERROR) test.add_stonith_log_pattern("(on) to be executed on target") ### verify unfencing using on_target device with topology test = self.new_test("cpg_unfence_on_target_3", "Verify unfencing with on_target = true using topology", 1) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 1 -v true1" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r %s -i 2 -v true2" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -U %s -t 3" % (our_uname)) test.add_stonith_log_pattern("(on) to be executed on target") ### verify unfencing using on_target device with topology fails when victim node doesn't exist test = self.new_test("cpg_unfence_on_target_4", "Verify unfencing failure with on_target = true using topology", 1) test.add_cmd("stonith_admin", "--output-as=xml -R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v true1") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 2 -v true2") test.add_expected_fail_cmd("stonith_admin", "--output-as=xml -U node_fake -t 3", CrmExit.ERROR) test.add_stonith_log_pattern("(on) to be executed on target") def build_remap_tests(self): """ Register tests that verify remapping of reboots to off-on """ test = self.new_test("cpg_remap_simple", "Verify sequential topology reboot is remapped to all-off-then-all-on", 1) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """ """-o "pcmk_off_timeout=1" -o "pcmk_reboot_timeout=10" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """ """-o "pcmk_off_timeout=2" -o "pcmk_reboot_timeout=20" """) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v true1 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") # timeout should be sum of off timeouts (1+2=3), not reboot timeouts (10+20=30) test.add_stonith_log_pattern("Total timeout set to 3 for peer's fencing targeting node_fake") test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true2") test.add_stonith_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") # fence_dummy sets "on" as an on_target action test.add_stonith_log_pattern("Ignoring true1 'on' failure (no capable peers) targeting node_fake") test.add_stonith_log_pattern("Ignoring true2 'on' failure (no capable peers) targeting node_fake") test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") test = self.new_test("cpg_remap_automatic", "Verify remapped topology reboot skips automatic 'on'", 1) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy_auto_unfence """ """-o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy_auto_unfence """ """-o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v true1 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true2") test.add_stonith_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") test.add_stonith_neg_log_pattern("perform 'on' action targeting node_fake using") test.add_stonith_neg_log_pattern("'on' failure") test = self.new_test("cpg_remap_complex_1", "Verify remapped topology reboot in second level works if non-remapped first level fails", 1) test.add_cmd("stonith_admin", """--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 2 -v true1 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") test.add_stonith_log_pattern("perform 'reboot' action targeting node_fake using false1") test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true2") test.add_stonith_log_pattern("Remapped 'off' targeting node_fake complete, remapping to 'on'") test.add_stonith_log_pattern("Ignoring true1 'on' failure (no capable peers) targeting node_fake") test.add_stonith_log_pattern("Ignoring true2 'on' failure (no capable peers) targeting node_fake") test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") test = self.new_test("cpg_remap_complex_2", "Verify remapped topology reboot failure in second level proceeds to third level", 1) test.add_cmd("stonith_admin", """--output-as=xml -R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", """--output-as=xml -R true3 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 1 -v false1") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 2 -v true1 -v false2 -v true3") test.add_cmd("stonith_admin", "--output-as=xml -r node_fake -i 3 -v true2") test.add_cmd("stonith_admin", "--output-as=xml -B node_fake -t 5") test.add_stonith_log_pattern("perform 'reboot' action targeting node_fake using false1") test.add_stonith_log_pattern("Remapping multiple-device reboot targeting node_fake") test.add_stonith_log_pattern("perform 'off' action targeting node_fake using true1") test.add_stonith_log_pattern("perform 'off' action targeting node_fake using false2") test.add_stonith_log_pattern("Attempted to execute agent fence_dummy (off) the maximum number of times") test.add_stonith_log_pattern("Undoing remap of reboot targeting node_fake") test.add_stonith_log_pattern("perform 'reboot' action targeting node_fake using true2") test.add_stonith_neg_log_pattern("node_fake with true3") def setup_environment(self, use_corosync): """ Prepare the host before executing any tests """ if use_corosync: if self.autogen_corosync_cfg: (handle, self.autogen_corosync_log) = tempfile.mkstemp(prefix="cts-fencing-", suffix=".corosync.log") os.close(handle) corosync_cfg = io.open("/etc/corosync/corosync.conf", "w") corosync_cfg.write(AUTOGEN_COROSYNC_TEMPLATE % (localname(), self.autogen_corosync_log)) corosync_cfg.close() ### make sure we are in control ### killall("corosync") self.start_corosync() subprocess.call(["cts-support", "install"]) def cleanup_environment(self, use_corosync): """ Clean up the host after executing desired tests """ if use_corosync: killall("corosync") if self.autogen_corosync_cfg: if self.verbose: print("Corosync output") logfile = io.open(self.autogen_corosync_log, 'rt') for line in logfile.readlines(): print(line.strip()) logfile.close() os.remove(self.autogen_corosync_log) os.remove("/etc/corosync/corosync.conf") subprocess.call(["cts-support", "uninstall"]) class TestOptions(object): """ Option handler """ def __init__(self): self.options = {} self.options['list-tests'] = 0 self.options['run-all'] = 1 self.options['run-only'] = "" self.options['run-only-pattern'] = "" self.options['verbose'] = 0 self.options['timeout'] = 2 self.options['force-wait'] = 0 self.options['invalid-arg'] = "" self.options['cpg-only'] = 0 self.options['no-cpg'] = 0 self.options['show-usage'] = 0 def build_options(self, argv): """ Set options based on command-line arguments """ args = argv[1:] skip = 0 for i in range(0, len(args)): if skip: skip = 0 continue elif args[i] == "-h" or args[i] == "--help": self.options['show-usage'] = 1 elif args[i] == "-l" or args[i] == "--list-tests": self.options['list-tests'] = 1 elif args[i] == "-V" or args[i] == "--verbose": self.options['verbose'] = 1 elif args[i] == "-t" or args[i] == "--timeout": self.options['timeout'] = float(args[i+1]) elif args[i] == "-w" or args[i] == "--force-wait": self.options['force-wait'] = 1 elif args[i] == "-n" or args[i] == "--no-cpg": self.options['no-cpg'] = 1 elif args[i] == "-c" or args[i] == "--cpg-only": self.options['cpg-only'] = 1 elif args[i] == "-r" or args[i] == "--run-only": self.options['run-only'] = args[i+1] skip = 1 elif args[i] == "-p" or args[i] == "--run-only-pattern": self.options['run-only-pattern'] = args[i+1] skip = 1 def show_usage(self): """ Show command usage """ print("usage: " + sys.argv[0] + " [options]") print("If no options are provided, all tests will run") print("Options:") print("\t [--help | -h] Show usage") print("\t [--list-tests | -l] Print out all registered tests.") print("\t [--cpg-only | -c] Only run tests that require corosync.") print("\t [--no-cpg | -n] Only run tests that do not require corosync") print("\t [--run-only | -r 'testname'] Run a specific test") print("\t [--verbose | -V] Verbose output") print("\t [--timeout | -t 'floating point number']" "\n\t\tUp to how many seconds each test case waits for the daemon to be initialized." "\n\t\tDefaults to 2. The value 0 means no limit.") print("\t [--force-wait | -w]" "\n\t\tEach test case waits the default/specified --timeout for the daemon without tracking the log.") print("\t [--run-only-pattern | -p 'string'] Run only tests containing the string value") print("\n\tExample: Run only the test 'start_stop'") print("\t\t " + sys.argv[0] + " --run-only start_stop") print("\n\tExample: Run only the tests with the string 'systemd' present in them") print("\t\t " + sys.argv[0] + " --run-only-pattern systemd") def main(argv): """ Run fencing regression tests as specified by arguments """ update_path() opts = TestOptions() opts.build_options(argv) use_corosync = 1 tests = Tests(opts.options['verbose'], opts.options['timeout'], opts.options['force-wait']) tests.build_standalone_tests() tests.build_custom_timeout_tests() tests.build_api_sanity_tests() tests.build_fence_merge_tests() tests.build_fence_no_merge_tests() tests.build_unfence_tests() tests.build_unfence_on_target_tests() tests.build_nodeid_tests() tests.build_remap_tests() if opts.options['list-tests']: tests.print_list() sys.exit(CrmExit.OK) elif opts.options['show-usage']: opts.show_usage() sys.exit(CrmExit.OK) print("Starting ...") if opts.options['no-cpg']: use_corosync = 0 tests.setup_environment(use_corosync) if opts.options['run-only-pattern'] != "": tests.run_tests_matching(opts.options['run-only-pattern']) tests.print_results() elif opts.options['run-only'] != "": tests.run_single(opts.options['run-only']) tests.print_results() elif opts.options['no-cpg']: tests.run_no_cpg() tests.print_results() elif opts.options['cpg-only']: tests.run_cpg_only() tests.print_results() else: tests.run_tests() tests.print_results() tests.cleanup_environment(use_corosync) tests.exit() if __name__ == "__main__": main(sys.argv) diff --git a/cts/cts-log-watcher.in b/cts/cts-log-watcher.in index 28f4efe7f1..cee9c948a4 100644 --- a/cts/cts-log-watcher.in +++ b/cts/cts-log-watcher.in @@ -1,87 +1,84 @@ #!@PYTHON@ """ Remote log reader for Pacemaker's Cluster Test Suite (CTS) Reads a specified number of lines from the supplied offset Returns the current offset Contains logic for handling truncation """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2014-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2014-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import sys import os import fcntl if __name__ == '__main__': limit = 0 offset = 0 prefix = '' filename = '/var/log/messages' skipthis=None args=sys.argv[1:] for i in range(0, len(args)): if skipthis: skipthis=None continue elif args[i] == '-l' or args[i] == '--limit': skipthis=1 limit = int(args[i+1]) elif args[i] == '-f' or args[i] == '--filename': skipthis=1 filename = args[i+1] elif args[i] == '-o' or args[i] == '--offset': skipthis=1 offset = args[i+1] elif args[i] == '-p' or args[i] == '--prefix': skipthis=1 prefix = args[i+1] elif args[i] == '-t' or args[i] == '--tag': skipthis=1 if not os.access(filename, os.R_OK): print(prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0)) sys.exit(1) logfile=open(filename, 'r') logfile.seek(0, os.SEEK_END) newsize=logfile.tell() if offset != 'EOF': offset = int(offset) if newsize >= offset: logfile.seek(offset) else: print(prefix + ('File truncated from %d to %d' % (offset, newsize))) if (newsize*1.05) < offset: logfile.seek(0) # else: we probably just lost a few logs after a fencing op # continue from the new end # TODO: accept a timestamp and discard all messages older than it # Don't block when we reach EOF fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) count = 0 while True: if logfile.tell() >= newsize: break elif limit and count >= limit: break line = logfile.readline() if not line: break print(line.strip()) count += 1 print(prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count)) logfile.close() diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index 939f8c8af8..919d3ed9fd 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -1,1521 +1,1518 @@ #!@PYTHON@ """ Regression tests for Pacemaker's scheduler """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - __copyright__ = "Copyright 2004-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import re import sys import stat import shlex import shutil import argparse import subprocess import platform DESC = """Regression tests for Pacemaker's scheduler""" # Each entry in TESTS is a group of tests, where each test consists of a # test base name, test description, and additional test arguments. # Test groups will be separated by newlines in output. TESTS = [ [ [ "simple1", "Offline" ], [ "simple2", "Start" ], [ "simple3", "Start 2" ], [ "simple4", "Start Failed" ], [ "simple6", "Stop Start" ], [ "simple7", "Shutdown" ], #[ "simple8", "Stonith" ], #[ "simple9", "Lower version" ], #[ "simple10", "Higher version" ], [ "simple11", "Priority (ne)" ], [ "simple12", "Priority (eq)" ], [ "simple8", "Stickiness" ], ], [ [ "group1", "Group" ], [ "group2", "Group + Native" ], [ "group3", "Group + Group" ], [ "group4", "Group + Native (nothing)" ], [ "group5", "Group + Native (move)" ], [ "group6", "Group + Group (move)" ], [ "group7", "Group colocation" ], [ "group13", "Group colocation (cant run)" ], [ "group8", "Group anti-colocation" ], [ "group9", "Group recovery" ], [ "group10", "Group partial recovery" ], [ "group11", "Group target_role" ], [ "group14", "Group stop (graph terminated)" ], [ "group15", "Negative group colocation" ], [ "bug-1573", "Partial stop of a group with two children" ], [ "bug-1718", "Mandatory group ordering - Stop group_FUN" ], [ "bug-lf-2613", "Move group on failure" ], [ "bug-lf-2619", "Move group on clone failure" ], [ "group-fail", "Ensure stop order is preserved for partially active groups" ], [ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ], [ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ], [ "group-dependents", "Account for the location preferences of things colocated with a group" ], [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ], ], [ [ "rsc_dep1", "Must not" ], [ "rsc_dep3", "Must" ], [ "rsc_dep5", "Must not 3" ], [ "rsc_dep7", "Must 3" ], [ "rsc_dep10", "Must (but cant)" ], [ "rsc_dep2", "Must (running)" ], [ "rsc_dep8", "Must (running : alt)" ], [ "rsc_dep4", "Must (running + move)" ], [ "asymmetric", "Asymmetric - require explicit location constraints" ], ], [ [ "orphan-0", "Orphan ignore" ], [ "orphan-1", "Orphan stop" ], [ "orphan-2", "Orphan stop, remove failcount" ], ], [ [ "params-0", "Params: No change" ], [ "params-1", "Params: Changed" ], [ "params-2", "Params: Resource definition" ], [ "params-3", "Params: Restart instead of reload if start pending" ], [ "params-4", "Params: Reload" ], [ "params-5", "Params: Restart based on probe digest" ], [ "novell-251689", "Resource definition change + target_role=stopped" ], [ "bug-lf-2106", "Restart all anonymous clone instances after config change" ], [ "params-6", "Params: Detect reload in previously migrated resource" ], [ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ], [ "not-reschedule-unneeded-monitor", "Do not reschedule unneeded monitors while resource definitions have changed" ], [ "reload-becomes-restart", "Cancel reload if restart becomes required" ], ], [ [ "target-0", "Target Role : baseline" ], [ "target-1", "Target Role : master" ], [ "target-2", "Target Role : invalid" ], ], [ [ "base-score", "Set a node's default score for all nodes" ], ], [ [ "date-1", "Dates", [ "-t", "2005-020" ] ], [ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ], [ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ], [ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ], [ "probe-0", "Probe (anon clone)" ], [ "probe-1", "Pending Probe" ], [ "probe-2", "Correctly re-probe cloned groups" ], [ "probe-3", "Probe (pending node)" ], [ "probe-4", "Probe (pending node + stopped resource)" ], [ "standby", "Standby" ], [ "comments", "Comments" ], ], [ [ "one-or-more-0", "Everything starts" ], [ "one-or-more-1", "Nothing starts because of A" ], [ "one-or-more-2", "D can start because of C" ], [ "one-or-more-3", "D cannot start because of B and C" ], [ "one-or-more-4", "D cannot start because of target-role" ], [ "one-or-more-5", "Start A and F even though C and D are stopped" ], [ "one-or-more-6", "Leave A running even though B is stopped" ], [ "one-or-more-7", "Leave A running even though C is stopped" ], [ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ], [ "clone-require-all-1", "clone B starts node 3 and 4" ], [ "clone-require-all-2", "clone B remains stopped everywhere" ], [ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ], [ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ], [ "clone-require-all-5", "clone B starts on node 1 3 and 4" ], [ "clone-require-all-6", "clone B remains active after shutting down instances of A" ], [ "clone-require-all-7", "clone A and B both start at the same time. all instances of A start before B" ], [ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ], [ "clone-require-all-no-interleave-2", "C starts on nodes 1, 2, and 4 with only one active instance of B" ], [ "clone-require-all-no-interleave-3", "C remains active when instance of B is stopped on one node and started on another" ], [ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ], ], [ [ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ], [ "location-date-rules-2", "Use location constraints with effective date-based rules" ], [ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ], [ "value-source", "Use location constraints with node attribute expressions using value-source" ], [ "rule-dbl-as-auto-number-match", "Floating-point rule values default to number comparison: match" ], [ "rule-dbl-as-auto-number-no-match", "Floating-point rule values default to number comparison: no " "match" ], [ "rule-dbl-as-integer-match", "Floating-point rule values set to integer comparison: match" ], [ "rule-dbl-as-integer-no-match", "Floating-point rule values set to integer comparison: no match" ], [ "rule-dbl-as-number-match", "Floating-point rule values set to number comparison: match" ], [ "rule-dbl-as-number-no-match", "Floating-point rule values set to number comparison: no match" ], [ "rule-dbl-parse-fail-default-str-match", "Floating-point rule values fail to parse, default to string " "comparison: match" ], [ "rule-dbl-parse-fail-default-str-no-match", "Floating-point rule values fail to parse, default to string " "comparison: no match" ], [ "rule-int-as-auto-integer-match", "Integer rule values default to integer comparison: match" ], [ "rule-int-as-auto-integer-no-match", "Integer rule values default to integer comparison: no match" ], [ "rule-int-as-integer-match", "Integer rule values set to integer comparison: match" ], [ "rule-int-as-integer-no-match", "Integer rule values set to integer comparison: no match" ], [ "rule-int-as-number-match", "Integer rule values set to number comparison: match" ], [ "rule-int-as-number-no-match", "Integer rule values set to number comparison: no match" ], [ "rule-int-parse-fail-default-str-match", "Integer rule values fail to parse, default to string " "comparison: match" ], [ "rule-int-parse-fail-default-str-no-match", "Integer rule values fail to parse, default to string " "comparison: no match" ], ], [ [ "order1", "Order start 1" ], [ "order2", "Order start 2" ], [ "order3", "Order stop" ], [ "order4", "Order (multiple)" ], [ "order5", "Order (move)" ], [ "order6", "Order (move w/ restart)" ], [ "order7", "Order (mandatory)" ], [ "order-optional", "Order (score=0)" ], [ "order-required", "Order (score=INFINITY)" ], [ "bug-lf-2171", "Prevent group start when clone is stopped" ], [ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ], [ "order-sets", "Ordering for resource sets" ], [ "order-serialize", "Serialize resources without inhibiting migration" ], [ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ], [ "clone-order-primitive", "Order clone start after a primitive" ], [ "clone-order-16instances", "Verify ordering of 16 cloned resources" ], [ "order-optional-keyword", "Order (optional keyword)" ], [ "order-mandatory", "Order (mandatory keyword)" ], [ "bug-lf-2493", "Don't imply colocation requirements when applying ordering constraints with clones" ], [ "ordered-set-basic-startup", "Constraint set with default order settings" ], [ "ordered-set-natural", "Allow natural set ordering" ], [ "order-wrong-kind", "Order (error)" ], ], [ [ "coloc-loop", "Colocation - loop" ], [ "coloc-many-one", "Colocation - many-to-one" ], [ "coloc-list", "Colocation - many-to-one with list" ], [ "coloc-group", "Colocation - groups" ], [ "coloc-slave-anti", "Anti-colocation with slave shouldn't prevent master colocation" ], [ "coloc-attr", "Colocation based on node attributes" ], [ "coloc-negative-group", "Negative colocation with a group" ], [ "coloc-intra-set", "Intra-set colocation" ], [ "bug-lf-2435", "Colocation sets with a negative score" ], [ "coloc-clone-stays-active", "Ensure clones don't get stopped/demoted because a dependent must stop" ], [ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ], [ "colo_master_w_native", "cl#5070 - Verify promotion order is affected when colocating master to native rsc" ], [ "colo_slave_w_native", "cl#5070 - Verify promotion order is affected when colocating slave to native rsc" ], [ "anti-colocation-order", "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ], [ "anti-colocation-master", "Organize order of actions for master resources in anti-colocations" ], [ "anti-colocation-slave", "Organize order of actions for slave resources in anti-colocations" ], [ "enforce-colo1", "Always enforce B with A INFINITY" ], [ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ], [ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ], [ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ], ], [ [ "rsc-sets-seq-true", "Resource Sets - sequential=false" ], [ "rsc-sets-seq-false", "Resource Sets - sequential=true" ], [ "rsc-sets-clone", "Resource Sets - Clone" ], [ "rsc-sets-master", "Resource Sets - Master" ], [ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ], ], [ [ "attrs1", "string: eq (and)" ], [ "attrs2", "string: lt / gt (and)" ], [ "attrs3", "string: ne (or)" ], [ "attrs4", "string: exists" ], [ "attrs5", "string: not_exists" ], [ "attrs6", "is_dc: true" ], [ "attrs7", "is_dc: false" ], [ "attrs8", "score_attribute" ], [ "per-node-attrs", "Per node resource parameters" ], ], [ [ "mon-rsc-1", "Schedule Monitor - start" ], [ "mon-rsc-2", "Schedule Monitor - move" ], [ "mon-rsc-3", "Schedule Monitor - pending start" ], [ "mon-rsc-4", "Schedule Monitor - move/pending start" ], ], [ [ "rec-rsc-0", "Resource Recover - no start" ], [ "rec-rsc-1", "Resource Recover - start" ], [ "rec-rsc-2", "Resource Recover - monitor" ], [ "rec-rsc-3", "Resource Recover - stop - ignore" ], [ "rec-rsc-4", "Resource Recover - stop - block" ], [ "rec-rsc-5", "Resource Recover - stop - fence" ], [ "rec-rsc-6", "Resource Recover - multiple - restart" ], [ "rec-rsc-7", "Resource Recover - multiple - stop" ], [ "rec-rsc-8", "Resource Recover - multiple - block" ], [ "rec-rsc-9", "Resource Recover - group/group" ], [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ], [ "stop-failure-no-quorum", "Stop failure without quorum" ], [ "stop-failure-no-fencing", "Stop failure without fencing available" ], [ "stop-failure-with-fencing", "Stop failure with fencing available" ], [ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ], [ "multiple-monitor-one-failed", "Consider resource failed if any of the configured monitor operations failed" ], ], [ [ "quorum-1", "No quorum - ignore" ], [ "quorum-2", "No quorum - freeze" ], [ "quorum-3", "No quorum - stop" ], [ "quorum-4", "No quorum - start anyway" ], [ "quorum-5", "No quorum - start anyway (group)" ], [ "quorum-6", "No quorum - start anyway (clone)" ], [ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ], [ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ], [ "suicide-not-needed-initial-quorum", "no-quorum-policy=suicide: suicide not necessary at initial quorum" ], [ "suicide-not-needed-never-quorate", "no-quorum-policy=suicide: suicide not necessary if never quorate" ], [ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ], ], [ [ "rec-node-1", "Node Recover - Startup - no fence" ], [ "rec-node-2", "Node Recover - Startup - fence" ], [ "rec-node-3", "Node Recover - HA down - no fence" ], [ "rec-node-4", "Node Recover - HA down - fence" ], [ "rec-node-5", "Node Recover - CRM down - no fence" ], [ "rec-node-6", "Node Recover - CRM down - fence" ], [ "rec-node-7", "Node Recover - no quorum - ignore" ], [ "rec-node-8", "Node Recover - no quorum - freeze" ], [ "rec-node-9", "Node Recover - no quorum - stop" ], [ "rec-node-10", "Node Recover - no quorum - stop w/fence" ], [ "rec-node-11", "Node Recover - CRM down w/ group - fence" ], [ "rec-node-12", "Node Recover - nothing active - fence" ], [ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ], [ "rec-node-15", "Node Recover - unknown lrm section" ], [ "rec-node-14", "Serialize all stonith's" ], ], [ [ "multi1", "Multiple Active (stop/start)" ], ], [ [ "migrate-begin", "Normal migration" ], [ "migrate-success", "Completed migration" ], [ "migrate-partial-1", "Completed migration, missing stop on source" ], [ "migrate-partial-2", "Successful migrate_to only" ], [ "migrate-partial-3", "Successful migrate_to only, target down" ], [ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ], [ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ], [ "migrate-fail-2", "Failed migrate_from" ], [ "migrate-fail-3", "Failed migrate_from + stop on source" ], [ "migrate-fail-4", "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-5", "Failed migrate_from + stop on source and target" ], [ "migrate-fail-6", "Failed migrate_to" ], [ "migrate-fail-7", "Failed migrate_to + stop on source" ], [ "migrate-fail-8", "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-9", "Failed migrate_to + stop on source and target" ], [ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ], [ "migrate-stop", "Migration in a stopping stack" ], [ "migrate-start", "Migration in a starting stack" ], [ "migrate-stop_start", "Migration in a restarting stack" ], [ "migrate-stop-complex", "Migration in a complex stopping stack" ], [ "migrate-start-complex", "Migration in a complex starting stack" ], [ "migrate-stop-start-complex", "Migration in a complex moving stack" ], [ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ], [ "migrate-1", "Migrate (migrate)" ], [ "migrate-2", "Migrate (stable)" ], [ "migrate-3", "Migrate (failed migrate_to)" ], [ "migrate-4", "Migrate (failed migrate_from)" ], [ "novell-252693", "Migration in a stopping stack" ], [ "novell-252693-2", "Migration in a starting stack" ], [ "novell-252693-3", "Non-Migration in a starting and stopping stack" ], [ "bug-1820", "Migration in a group" ], [ "bug-1820-1", "Non-migration in a group" ], [ "migrate-5", "Primitive migration with a clone" ], [ "migrate-fencing", "Migration after Fencing" ], [ "migrate-both-vms", "Migrate two VMs that have no colocation" ], [ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ], [ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ], [ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ], [ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ], [ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ], [ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ], [ "6-migrate-group", "Advanced migrate logic, migrate a group" ], [ "7-migrate-group-one-unmigratable", "Advanced migrate logic, migrate group mixed with allow-migrate true/false" ], [ "8-am-then-bm-a-migrating-b-stopping", "Advanced migrate logic, A then B, A migrating, B stopping" ], [ "9-am-then-bm-b-migrating-a-stopping", "Advanced migrate logic, A then B, B migrate, A stopping" ], [ "10-a-then-bm-b-move-a-clone", "Advanced migrate logic, A clone then B, migrate B while stopping A" ], [ "11-a-then-bm-b-move-a-clone-starting", "Advanced migrate logic, A clone then B, B moving while A is start/stopping" ], [ "a-promote-then-b-migrate", "A promote then B start. migrate B" ], [ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ], # @TODO: If pacemaker implements versioned attributes, uncomment this test #[ "migrate-versioned", "Disable migration for versioned resources" ], [ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ], ], [ [ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ], [ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ], [ "clone-anon-failcount", "Merge failcounts for anonymous clones" ], [ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ], [ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ], [ "inc0", "Incarnation start" ], [ "inc1", "Incarnation start order" ], [ "inc2", "Incarnation silent restart, stop, move" ], [ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ], [ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ], [ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ], [ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ], [ "inc7", "Clone colocation" ], [ "inc8", "Clone anti-colocation" ], [ "inc9", "Non-unique clone" ], [ "inc10", "Non-unique clone (stop)" ], [ "inc11", "Primitive colocation with clones" ], [ "inc12", "Clone shutdown" ], [ "cloned-group", "Make sure only the correct number of cloned groups are started" ], [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ], [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ], [ "clone-max-zero", "Orphan processing with clone-max=0" ], [ "clone-anon-dup", "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ], [ "bug-lf-2160", "Don't shuffle clones due to colocation" ], [ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ], [ "bug-lf-2153", "Clone ordering constraints" ], [ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ], [ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ], [ "clone-colocate-instance-1", "Colocation with a specific clone instance (negative example)" ], [ "clone-colocate-instance-2", "Colocation with a specific clone instance" ], [ "clone-order-instance", "Ordering with specific clone instances" ], [ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ], [ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ], [ "bug-lf-2544", "Balanced clone placement" ], [ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ], [ "bug-lf-2574", "Avoid clone shuffle" ], [ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ], [ "bug-cl-5168", "Don't shuffle clones" ], [ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ], [ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ], [ "clone-interleave-1", "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-3", "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ], [ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ], [ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ], [ "clone-requires-quorum", "Clone with requires=quorum with presumed-inactive instance on failed node" ], ], [ [ "cloned_start_one", "order first clone then clone... first clone_min=2" ], [ "cloned_start_two", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_one", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_two", "order first clone then clone... first clone_min=2" ], [ "clone_min_interleave_start_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_start_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ], ], [ [ "unfence-startup", "Clean unfencing" ], [ "unfence-definition", "Unfencing when the agent changes" ], [ "unfence-parameters", "Unfencing when the agent parameters changes" ], [ "unfence-device", "Unfencing when a cluster has only fence devices" ], ], [ [ "master-0", "Stopped -> Slave" ], [ "master-1", "Stopped -> Promote" ], [ "master-2", "Stopped -> Promote : notify" ], [ "master-3", "Stopped -> Promote : master location" ], [ "master-4", "Started -> Promote : master location" ], [ "master-5", "Promoted -> Promoted" ], [ "master-6", "Promoted -> Promoted (2)" ], [ "master-7", "Promoted -> Fenced" ], [ "master-8", "Promoted -> Fenced -> Moved" ], [ "master-9", "Stopped + Promotable + No quorum" ], [ "master-10", "Stopped -> Promotable : notify with monitor" ], [ "master-11", "Stopped -> Promote : colocation" ], [ "novell-239082", "Demote/Promote ordering" ], [ "novell-239087", "Stable master placement" ], [ "master-12", "Promotion based solely on rsc_location constraints" ], [ "master-13", "Include preferences of colocated resources when placing master" ], [ "master-demote", "Ordering when actions depends on demoting a slave resource" ], [ "master-ordering", "Prevent resources from starting that need a master" ], [ "bug-1765", "Master-Master Colocation (do not stop the slaves)" ], [ "master-group", "Promotion of cloned groups" ], [ "bug-lf-1852", "Don't shuffle master/slave instances unnecessarily" ], [ "master-failed-demote", "Don't retry failed demote actions" ], [ "master-failed-demote-2", "Don't retry failed demote actions (notify=false)" ], [ "master-depend", "Ensure resources that depend on the master don't get allocated until the master does" ], [ "master-reattach", "Re-attach to a running master" ], [ "master-allow-start", "Don't include master score if it would prevent allocation" ], [ "master-colocation", "Allow master instances placemaker to be influenced by colocation constraints" ], [ "master-pseudo", "Make sure promote/demote pseudo actions are created correctly" ], [ "master-role", "Prevent target-role from promoting more than master-max instances" ], [ "bug-lf-2358", "Master-Master anti-colocation" ], [ "master-promotion-constraint", "Mandatory master colocation constraints" ], [ "unmanaged-master", "Ensure role is preserved for unmanaged resources" ], [ "master-unmanaged-monitor", "Start the correct monitor operation for unmanaged masters" ], [ "master-demote-2", "Demote does not clear past failure" ], [ "master-move", "Move master based on failure of colocated group" ], [ "master-probed-score", "Observe the promotion score of probed resources" ], [ "colocation_constraint_stops_master", "cl#5054 - Ensure master is demoted when stopped by colocation constraint" ], [ "colocation_constraint_stops_slave", "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" ], [ "order_constraint_stops_master", "cl#5054 - Ensure master is demoted when stopped by order constraint" ], [ "order_constraint_stops_slave", "cl#5054 - Ensure slave is not demoted when stopped by order constraint" ], [ "master_monitor_restart", "cl#5072 - Ensure master monitor operation will start after promotion" ], [ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ], [ "bug-5143-ms-shuffle", "Prevent master shuffling due to promotion score" ], [ "master-demote-block", "Block promotion if demote fails with on-fail=block" ], [ "master-dependent-ban", "Don't stop instances from being active because a dependent is banned from that host" ], [ "master-stop", "Stop instances due to location constraint with role=Started" ], [ "master-partially-demoted-group", "Allow partially demoted group to finish demoting" ], [ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ], [ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ], [ "master-asymmetrical-order", "Fix the behaviors of multi-state resources with asymmetrical ordering" ], [ "master-notify", "Master promotion with notifies" ], [ "master-score-startup", "Use permanent master scores without LRM history" ], [ "failed-demote-recovery", "Recover resource in slave role after demote fails" ], [ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ], [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ], [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ], [ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ], ], [ [ "history-1", "Correctly parse stateful-1 resource state" ], ], [ [ "managed-0", "Managed (reference)" ], [ "managed-1", "Not managed - down" ], [ "managed-2", "Not managed - up" ], [ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ], [ "bug-5028-detach", "Ensure detach still works" ], [ "bug-5028-bottom", "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ], [ "unmanaged-stop-1", "cl#5155 - Block the stop of resources if any depending resource is unmanaged" ], [ "unmanaged-stop-2", "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ], [ "unmanaged-stop-3", "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ], [ "unmanaged-stop-4", "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ], [ "unmanaged-block-restart", "Block restart of resources if any dependent resource in a group is unmanaged" ], ], [ [ "interleave-0", "Interleave (reference)" ], [ "interleave-1", "coloc - not interleaved" ], [ "interleave-2", "coloc - interleaved" ], [ "interleave-3", "coloc - interleaved (2)" ], [ "interleave-pseudo-stop", "Interleaved clone during stonith" ], [ "interleave-stop", "Interleaved clone during stop" ], [ "interleave-restart", "Interleaved clone during dependency restart" ], ], [ [ "notify-0", "Notify reference" ], [ "notify-1", "Notify simple" ], [ "notify-2", "Notify simple, confirm" ], [ "notify-3", "Notify move, confirm" ], [ "novell-239079", "Notification priority" ], #[ "notify-2", "Notify - 764" ], [ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ], [ "route-remote-notify", "Route remote notify actions through correct cluster node" ], [ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ], ], [ [ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ], [ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ], [ "696", "OSDL #696 - CRM starts stonith RA without monitor" ], [ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ], [ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ], [ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ], [ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ], [ "829", "OSDL #829" ], [ "994", "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ], [ "994-2", "OSDL #994 - with a dependent resource" ], [ "1360", "OSDL #1360 - Clone stickiness" ], [ "1484", "OSDL #1484 - on_fail=stop" ], [ "1494", "OSDL #1494 - Clone stability" ], [ "unrunnable-1", "Unrunnable" ], [ "unrunnable-2", "Unrunnable 2" ], [ "stonith-0", "Stonith loop - 1" ], [ "stonith-1", "Stonith loop - 2" ], [ "stonith-2", "Stonith loop - 3" ], [ "stonith-3", "Stonith startup" ], [ "stonith-4", "Stonith node state" ], [ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ], [ "bug-1572-1", "Recovery of groups depending on master/slave" ], [ "bug-1572-2", "Recovery of groups depending on master/slave when the master is never re-promoted" ], [ "bug-1685", "Depends-on-master ordering" ], [ "bug-1822", "Don't promote partially active groups" ], [ "bug-pm-11", "New resource added to a m/s group" ], [ "bug-pm-12", "Recover only the failed portion of a cloned group" ], [ "bug-n-387749", "Don't shuffle clone instances" ], [ "bug-n-385265", "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ], [ "bug-n-385265-2", "Ensure groups are migrated instead of remaining partially active on the current node" ], [ "bug-lf-1920", "Correctly handle probes that find active resources" ], [ "bnc-515172", "Location constraint with multiple expressions" ], [ "colocate-primitive-with-clone", "Optional colocation with a clone" ], [ "use-after-free-merge", "Use-after-free in native_merge_weights" ], [ "bug-lf-2551", "STONITH ordering for stop" ], [ "bug-lf-2606", "Stonith implies demote" ], [ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ], [ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ], [ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ], [ "bug-5014-A-stop-B-started", "Verify when A stops B does not stop if it has already started using symmetric=false" ], [ "bug-5014-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ], [ "bug-5014-CthenAthenB-C-stopped", "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ], [ "bug-5014-CLONE-A-start-B-start", "Verify when A starts B starts using clone resources with symmetric=false" ], [ "bug-5014-CLONE-A-stop-B-started", "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ], [ "bug-5014-GROUP-A-start-B-start", "Verify when A starts B starts when using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-started", "Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ], [ "bug-5014-ordered-set-symmetrical-false", "Verify ordered sets work with symmetrical=false" ], [ "bug-5014-ordered-set-symmetrical-true", "Verify ordered sets work with symmetrical=true" ], [ "bug-5007-masterslave_colocation", "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ], [ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ], [ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ], [ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ], [ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ], [ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ], [ "failcount", "Ensure failcounts are correctly expired" ], [ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ], [ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ], [ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ], [ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ], [ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ], [ "bug-5059", "No need to restart p_stateful1:*" ], [ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ], [ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ], [ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ], [ "expire-non-blocked-failure", "Ignore failure-timeout only if the failed operation has on-fail=block" ], [ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ], [ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ], [ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ], [ "order-expired-failure", "Order failcount cleanup after remote fencing" ], [ "ignore_stonith_rsc_order1", "cl#5056- Ignore order constraint between stonith and non-stonith rsc" ], [ "ignore_stonith_rsc_order2", "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ], [ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ], [ "ignore_stonith_rsc_order4", "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ], [ "honor_stonith_rsc_order1", "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ], [ "honor_stonith_rsc_order2", "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ], [ "honor_stonith_rsc_order3", "cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ], [ "honor_stonith_rsc_order4", "cl#5056- Honor order constraint, between two native stonith rscs" ], [ "multiply-active-stonith", "Multiply active stonith" ], [ "probe-timeout", "cl#5099 - Default probe timeout" ], [ "order-first-probes", "cl#5301 - respect order constraints when relevant resources are being probed" ], [ "concurrent-fencing", "Allow performing fencing operations in parallel" ], [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ], ], [ [ "systemhealth1", "System Health () #1" ], [ "systemhealth2", "System Health () #2" ], [ "systemhealth3", "System Health () #3" ], [ "systemhealthn1", "System Health (None) #1" ], [ "systemhealthn2", "System Health (None) #2" ], [ "systemhealthn3", "System Health (None) #3" ], [ "systemhealthm1", "System Health (Migrate On Red) #1" ], [ "systemhealthm2", "System Health (Migrate On Red) #2" ], [ "systemhealthm3", "System Health (Migrate On Red) #3" ], [ "systemhealtho1", "System Health (Only Green) #1" ], [ "systemhealtho2", "System Health (Only Green) #2" ], [ "systemhealtho3", "System Health (Only Green) #3" ], [ "systemhealthp1", "System Health (Progessive) #1" ], [ "systemhealthp2", "System Health (Progessive) #2" ], [ "systemhealthp3", "System Health (Progessive) #3" ], ], [ [ "utilization", "Placement Strategy - utilization" ], [ "minimal", "Placement Strategy - minimal" ], [ "balanced", "Placement Strategy - balanced" ], ], [ [ "placement-stickiness", "Optimized Placement Strategy - stickiness" ], [ "placement-priority", "Optimized Placement Strategy - priority" ], [ "placement-location", "Optimized Placement Strategy - location" ], [ "placement-capacity", "Optimized Placement Strategy - capacity" ], ], [ [ "utilization-order1", "Utilization Order - Simple" ], [ "utilization-order2", "Utilization Order - Complex" ], [ "utilization-order3", "Utilization Order - Migrate" ], [ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ], [ "utilization-shuffle", "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ], [ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ], [ "load-stopped-loop-2", "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ], ], [ [ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ], [ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ], [ "colocated-utilization-group", "Colocated Utilization - Group" ], [ "colocated-utilization-clone", "Colocated Utilization - Clone" ], [ "utilization-check-allowed-nodes", "Only check the capacities of the nodes that can run the resource" ], ], [ [ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ], [ "node-maintenance-1", "cl#5128 - Node maintenance" ], [ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ], [ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ], [ "rsc-maintenance", "Per-resource maintenance" ], ], [ [ "not-installed-agent", "The resource agent is missing" ], [ "not-installed-tools", "Something the resource agent needs is missing" ], ], [ [ "stopped-monitor-00", "Stopped Monitor - initial start" ], [ "stopped-monitor-01", "Stopped Monitor - failed started" ], [ "stopped-monitor-02", "Stopped Monitor - started multi-up" ], [ "stopped-monitor-03", "Stopped Monitor - stop started" ], [ "stopped-monitor-04", "Stopped Monitor - failed stop" ], [ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ], [ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ], [ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ], [ "stopped-monitor-08", "Stopped Monitor - migrate" ], [ "stopped-monitor-09", "Stopped Monitor - unmanage started" ], [ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ], [ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ], [ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ], [ "stopped-monitor-20", "Stopped Monitor - initial stop" ], [ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ], [ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ], [ "stopped-monitor-23", "Stopped Monitor - start stopped" ], [ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ], [ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ], [ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ], [ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ], [ "stopped-monitor-30", "Stopped Monitor - new node started" ], [ "stopped-monitor-31", "Stopped Monitor - new node stopped" ], ], [ # This is a combo test to check: # - probe timeout defaults to the minimum-interval monitor's # - duplicate recurring operations are ignored # - if timeout spec is bad, the default timeout is used # - failure is blocked with on-fail=block even if ISO8601 interval is specified # - started/stopped role monitors are started/stopped on right nodes [ "intervals", "Recurring monitor interval handling" ], ], [ [ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ], [ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ], [ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ], [ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ], [ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ], [ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ], [ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ], [ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ], [ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ], [ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ], [ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ], [ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ], [ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ], [ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ], [ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ], [ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ], [ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ], [ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ], [ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ], [ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ], [ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ], [ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ], [ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ], [ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ], [ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ], [ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ], [ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ], [ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ], [ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ], [ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ], [ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ], [ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ], [ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ], [ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ], [ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ], [ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ], [ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ], [ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ], [ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ], [ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ], [ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ], [ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ], [ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ], [ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ], [ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ], [ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ], [ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ], [ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ], [ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ], [ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ], [ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ], [ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ], [ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ], [ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ], [ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ], [ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ], [ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ], [ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ], [ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ], [ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ], [ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ], [ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ], [ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ], [ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ], [ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ], [ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ], [ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ], [ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ], [ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ], [ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-master-1", "Ticket - Master (loss-policy=stop, initial)" ], [ "ticket-master-2", "Ticket - Master (loss-policy=stop, granted)" ], [ "ticket-master-3", "Ticket - Master (loss-policy-stop, revoked)" ], [ "ticket-master-4", "Ticket - Master (loss-policy=demote, initial)" ], [ "ticket-master-5", "Ticket - Master (loss-policy=demote, granted)" ], [ "ticket-master-6", "Ticket - Master (loss-policy=demote, revoked)" ], [ "ticket-master-7", "Ticket - Master (loss-policy=fence, initial)" ], [ "ticket-master-8", "Ticket - Master (loss-policy=fence, granted)" ], [ "ticket-master-9", "Ticket - Master (loss-policy=fence, revoked)" ], [ "ticket-master-10", "Ticket - Master (loss-policy=freeze, initial)" ], [ "ticket-master-11", "Ticket - Master (loss-policy=freeze, granted)" ], [ "ticket-master-12", "Ticket - Master (loss-policy=freeze, revoked)" ], [ "ticket-master-13", "Ticket - Master (loss-policy=stop, standby, granted)" ], [ "ticket-master-14", "Ticket - Master (loss-policy=stop, granted, standby)" ], [ "ticket-master-15", "Ticket - Master (loss-policy=stop, standby, revoked)" ], [ "ticket-master-16", "Ticket - Master (loss-policy=demote, standby, granted)" ], [ "ticket-master-17", "Ticket - Master (loss-policy=demote, granted, standby)" ], [ "ticket-master-18", "Ticket - Master (loss-policy=demote, standby, revoked)" ], [ "ticket-master-19", "Ticket - Master (loss-policy=fence, standby, granted)" ], [ "ticket-master-20", "Ticket - Master (loss-policy=fence, granted, standby)" ], [ "ticket-master-21", "Ticket - Master (loss-policy=fence, standby, revoked)" ], [ "ticket-master-22", "Ticket - Master (loss-policy=freeze, standby, granted)" ], [ "ticket-master-23", "Ticket - Master (loss-policy=freeze, granted, standby)" ], [ "ticket-master-24", "Ticket - Master (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ], [ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ], [ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ], [ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ], [ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ], [ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ], [ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ], [ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ], [ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ], [ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ], [ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ], [ "site-specific-params", "Site-specific instance attributes based on rules" ], ], [ [ "template-1", "Template - 1" ], [ "template-2", "Template - 2" ], [ "template-3", "Template - 3 (merge operations)" ], [ "template-coloc-1", "Template - Colocation 1" ], [ "template-coloc-2", "Template - Colocation 2" ], [ "template-coloc-3", "Template - Colocation 3" ], [ "template-order-1", "Template - Order 1" ], [ "template-order-2", "Template - Order 2" ], [ "template-order-3", "Template - Order 3" ], [ "template-ticket", "Template - Ticket" ], [ "template-rsc-sets-1", "Template - Resource Sets 1" ], [ "template-rsc-sets-2", "Template - Resource Sets 2" ], [ "template-rsc-sets-3", "Template - Resource Sets 3" ], [ "template-rsc-sets-4", "Template - Resource Sets 4" ], [ "template-clone-primitive", "Cloned primitive from template" ], [ "template-clone-group", "Cloned group from template" ], [ "location-sets-templates", "Resource sets and templates - Location" ], [ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ], [ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ], [ "tags-location", "Tags - Location" ], [ "tags-ticket", "Tags - Ticket" ], ], [ [ "container-1", "Container - initial" ], [ "container-2", "Container - monitor failed" ], [ "container-3", "Container - stop failed" ], [ "container-4", "Container - reached migration-threshold" ], [ "container-group-1", "Container in group - initial" ], [ "container-group-2", "Container in group - monitor failed" ], [ "container-group-3", "Container in group - stop failed" ], [ "container-group-4", "Container in group - reached migration-threshold" ], [ "container-is-remote-node", "Place resource within container when container is remote-node" ], [ "bug-rh-1097457", "Kill user defined container/contents ordering" ], [ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ], [ "bundle-order-startup", "Bundle startup ordering" ], [ "bundle-order-partial-start", "Bundle startup ordering when some dependencies are already running" ], [ "bundle-order-partial-start-2", "Bundle startup ordering when some dependencies and the container are already running" ], [ "bundle-order-stop", "Bundle stop ordering" ], [ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ], [ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ], [ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ], [ "bundle-order-startup-clone-2", "Bundle startup with clones" ], [ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ], [ "bundle-nested-colocation", "Colocation of nested connection resources" ], [ "bundle-order-fencing", "Order pseudo bundle fencing after parent node fencing if both are happening" ], [ "bundle-probe-order-1", "order 1" ], [ "bundle-probe-order-2", "order 2" ], [ "bundle-probe-order-3", "order 3" ], [ "bundle-probe-remotes", "Ensure remotes get probed too" ], [ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ], [ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ], ], [ [ "whitebox-fail1", "Fail whitebox container rsc" ], [ "whitebox-fail2", "Fail cluster connection to guest node" ], [ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ], [ "whitebox-start", "Start whitebox container with resources assigned to it" ], [ "whitebox-stop", "Stop whitebox container with resources assigned to it" ], [ "whitebox-move", "Move whitebox container with resources assigned to it" ], [ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ], [ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ], [ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ], [ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ], [ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ], [ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ], [ "whitebox-migrate1", "Migrate both container and connection resource" ], [ "whitebox-imply-stop-on-fence", "imply stop action on container node rsc when host node is fenced" ], [ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ], [ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ], [ "guest-node-cleanup", "Order guest node connection recovery after container probe" ], [ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ], ], [ [ "remote-startup-probes", "Baremetal remote-node startup probes" ], [ "remote-startup", "Startup a newly discovered remote-nodes with no status" ], [ "remote-fence-unclean", "Fence unclean baremetal remote-node" ], [ "remote-fence-unclean2", "Fence baremetal remote-node after cluster node fails and connection can not be recovered" ], [ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ], [ "remote-move", "Move remote-node connection resource" ], [ "remote-disable", "Disable a baremetal remote-node" ], [ "remote-probe-disable", "Probe then stop a baremetal remote-node" ], [ "remote-orphaned", "Properly shutdown orphaned connection resource" ], [ "remote-orphaned2", "verify we can handle orphaned remote connections with active resources on the remote" ], [ "remote-recover", "Recover connection resource after cluster-node fails" ], [ "remote-stale-node-entry", "Make sure we properly handle leftover remote-node entries in the node section" ], [ "remote-partial-migrate", "Make sure partial migrations are handled before ops on the remote node" ], [ "remote-partial-migrate2", "Make sure partial migration target is prefered for remote connection" ], [ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ], [ "remote-start-fail", "Make sure a start failure does not result in fencing if no active resources are on remote" ], [ "remote-unclean2", "Make monitor failure always results in fencing, even if no rsc are active on remote" ], [ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ], [ "remote-recovery", "Recover remote connections before attempting demotion" ], [ "remote-recover-connection", "Optimistically recovery of only the connection" ], [ "remote-recover-all", "Fencing when the connection has no home" ], [ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ], [ "remote-recover-unknown", "Fencing when the connection has no home and the remote has no operation history" ], [ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ], [ "remote-connection-unrecoverable", "Remote connection host must be fenced, with connection unrecoverable" ], ], [ [ "resource-discovery", "Exercises resource-discovery location constraint option" ], [ "rsc-discovery-per-node", "Disable resource discovery per node" ], [ "shutdown-lock", "Ensure shutdown lock works properly" ], [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], ], [ [ "op-defaults", "Test op_defaults conditional expressions" ], [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], [ "op-defaults-3", "Test op_defaults precedence" ], [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ], ], [ [ "stop-all-resources", "Test stop-all-resources=true "], ], [ [ "ocf_degraded-remap-ocf_ok", "Test DEGRADED remapped to OK" ], [ "ocf_degraded_master-remap-ocf_ok", "Test DEGRADED_MASTER remapped to OK"], ], # @TODO: If pacemaker implements versioned attributes, uncomment these tests #[ # [ "versioned-resources", "Start resources with #ra-version rules" ], # [ "restart-versioned", "Restart resources on #ra-version change" ], # [ "reload-versioned", "Reload resources on #ra-version change" ], #], #[ # [ "versioned-operations-1", "Use #ra-version to configure operations of native resources" ], # [ "versioned-operations-2", "Use #ra-version to configure operations of stonith resources" ], # [ "versioned-operations-3", "Use #ra-version to configure operations of master/slave resources" ], # [ "versioned-operations-4", "Use #ra-version to configure operations of groups of the resources" ], #], ] TESTS_64BIT = [ [ [ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ], ], ] # Constants substituted in the build process class BuildVars(object): SBINDIR = "@sbindir@" BUILDDIR = "@abs_top_builddir@" CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@" # These values must be kept in sync with include/crm/crm.h class CrmExit(object): OK = 0 ERROR = 1 NOT_INSTALLED = 5 NOINPUT = 66 def is_executable(path): """ Check whether a file at a given path is executable. """ try: return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR except OSError: return False def diff(file1, file2, **kwargs): """ Call diff on two files """ return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space", "--ignore-blank-lines", file1, file2 ], **kwargs) def sort_file(filename): """ Sort a file alphabetically """ with io.open(filename, "rt") as f: lines = sorted(f) with io.open(filename, "wt") as f: f.writelines(lines) def remove_files(filenames): """ Remove a list of files """ for filename in filenames: try: os.remove(filename) except OSError: pass def normalize(filename): """ Remove text from a file that isn't important for comparison """ if not hasattr(normalize, "patterns"): normalize.patterns = [ re.compile(r'crm_feature_set="[^"]*"'), re.compile(r'batch-limit="[0-9]*"') ] if os.path.isfile(filename): with io.open(filename, "rt") as f: lines = f.readlines() with io.open(filename, "wt") as f: for line in lines: for pattern in normalize.patterns: line = pattern.sub("", line) f.write(line) def cat(filename, dest=sys.stdout): """ Copy a file to a destination file descriptor """ with io.open(filename, "rt") as f: shutil.copyfileobj(f, dest) class CtsScheduler(object): """ Regression tests for Pacemaker's scheduler """ def _parse_args(self, argv): """ Parse command-line arguments """ parser = argparse.ArgumentParser(description=DESC) parser.add_argument('-V', '--verbose', action='count', help='Display any differences from expected output') parser.add_argument('--run', metavar='TEST', help=('Run only single specified test (any further ' 'arguments will be passed to crm_simulate)')) parser.add_argument('--update', action='store_true', help='Update expected results with actual results') parser.add_argument('-b', '--binary', metavar='PATH', help='Specify path to crm_simulate') parser.add_argument('-i', '--io-dir', metavar='PATH', help='Specify path to regression test data directory') parser.add_argument('-o', '--out-dir', metavar='PATH', help='Specify where intermediate and output files should go') parser.add_argument('-v', '--valgrind', action='store_true', help='Run all commands under valgrind') parser.add_argument('--valgrind-dhat', action='store_true', help='Run all commands under valgrind with heap analyzer') parser.add_argument('--valgrind-skip-output', action='store_true', help='If running under valgrind, do not display output') parser.add_argument('--testcmd-options', metavar='OPTIONS', default='', help='Additional options for command under test') # argparse can't handle "everything after --run TEST", so grab that self.single_test_args = [] narg = 0 for arg in argv: narg = narg + 1 if arg == '--run': (argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:]) break self.args = parser.parse_args(argv[1:]) def _error(self, s): print(" * ERROR: %s" % s) def _failed(self, s): print(" * FAILED: %s" % s) def _get_valgrind_cmd(self): """ Return command arguments needed (or not) to run valgrind """ if self.args.valgrind: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "-q", "--gen-suppressions=all", "--time-stamp=yes", "--trace-children=no", "--show-reachable=no", "--leak-check=full", "--num-callers=20", "--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home) ] if self.args.valgrind_dhat: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "--tool=exp-dhat", "--time-stamp=yes", "--trace-children=no", "--show-top-n=100", "--num-callers=4" ] return [] def _get_simulator_cmd(self): """ Locate the simulation binary """ if self.args.binary is None: self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate" if not is_executable(self.args.binary): self.args.binary = BuildVars.SBINDIR + "/crm_simulate" if not is_executable(self.args.binary): # @TODO it would be more pythonic to raise an exception self._error("Test binary " + self.args.binary + " not found") sys.exit(CrmExit.NOT_INSTALLED) return [ self.args.binary ] + shlex.split(self.args.testcmd_options) def set_schema_env(self): """ Ensure schema directory environment variable is set, if possible """ try: return os.environ['PCMK_schema_directory'] except KeyError: for d in [ os.path.join(BuildVars.BUILDDIR, "xml"), BuildVars.CRM_SCHEMA_DIRECTORY ]: if os.path.isdir(d): os.environ['PCMK_schema_directory'] = d return d return None def __init__(self, argv=sys.argv): self._parse_args(argv) # Where this executable lives self.test_home = os.path.dirname(os.path.realpath(argv[0])) # Where test data resides if self.args.io_dir is None: self.args.io_dir = os.path.join(self.test_home, "scheduler") # Where to store generated files if self.args.out_dir is None: self.args.out_dir = self.args.io_dir self.failed_filename = os.path.join(self.test_home, ".regression.failed.diff") else: self.failed_filename = os.path.join(self.args.out_dir, ".regression.failed.diff") os.environ['CIB_shadow_dir'] = self.args.out_dir self.failed_file = None # Single test mode (if requested) try: # User can give test base name or file name of a test input self.args.run = os.path.splitext(os.path.basename(self.args.run))[0] except (AttributeError, TypeError): pass # --run was not specified self.set_schema_env() # Arguments needed (or not) to run commands self.valgrind_args = self._get_valgrind_cmd() self.simulate_args = self._get_simulator_cmd() # Test counters self.num_failed = 0 self.num_tests = 0 def _compare_files(self, filename1, filename2): """ Add any file differences to failed results """ with io.open("/dev/null", "wt") as dev_null: if diff(filename1, filename2, stdout=dev_null) != 0: diff(filename1, filename2, stdout=self.failed_file, stderr=dev_null) self.failed_file.write("\n"); return True return False def run_one(self, test_name, test_desc, test_args=[]): """ Run one scheduler test """ print(" Test %-25s %s" % ((test_name + ":"), test_desc)) did_fail = False self.num_tests = self.num_tests + 1 # Test inputs input_filename = "%s/%s.xml" % (self.args.io_dir, test_name) expected_filename = "%s/%s.exp" % (self.args.io_dir, test_name) dot_expected_filename = "%s/%s.dot" % (self.args.io_dir, test_name) scores_filename = "%s/%s.scores" % (self.args.io_dir, test_name) summary_filename = "%s/%s.summary" % (self.args.io_dir, test_name) stderr_expected_filename = "%s/%s.stderr" % (self.args.io_dir, test_name) # (Intermediate) test outputs output_filename = "%s/%s.out" % (self.args.out_dir, test_name) dot_output_filename = "%s/%s.pe.dot" % (self.args.out_dir, test_name) score_output_filename = "%s/%s.scores.pe" % (self.args.out_dir, test_name) summary_output_filename = "%s/%s.summary.pe" % (self.args.out_dir, test_name) stderr_output_filename = "%s/%s.stderr.pe" % (self.args.out_dir, test_name) valgrind_output_filename = "%s/%s.valgrind" % (self.args.out_dir, test_name) # Common arguments for running test test_cmd = [] if self.valgrind_args: test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ] test_cmd = test_cmd + self.simulate_args # @TODO It would be more pythonic to raise exceptions for errors, # then perhaps it would be nice to make a single-test class # Ensure necessary test inputs exist if not os.path.isfile(input_filename): self._error("No input") self.num_failed = self.num_failed + 1 return CrmExit.NOINPUT if not self.args.update and not os.path.isfile(expected_filename): self._error("no stored output") return CrmExit.NOINPUT # Run simulation to generate summary output if self.args.run: # Single test mode test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args print(" ".join(test_cmd_full)) else: # @TODO Why isn't test_args added here? test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] with io.open(summary_output_filename, "wt") as f: subprocess.call(test_cmd_full, stdout=f, stderr=subprocess.STDOUT, env=os.environ) if self.args.run: cat(summary_output_filename) # Re-run simulation to generate dot, graph, and scores test_cmd_full = test_cmd + [ '-x', input_filename, '-D', dot_output_filename, '-G', output_filename, '-sSQ' ] + test_args with io.open(stderr_output_filename, "wt") as f_stderr, \ io.open(score_output_filename, "wt") as f_score: rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ) # Check for test command failure if rc != CrmExit.OK: self._failed("Test returned: %d" % rc) did_fail = True print(" ".join(test_cmd_full)) # Check for valgrind errors if self.valgrind_args and not self.args.valgrind_skip_output: if os.stat(valgrind_output_filename).st_size > 0: self._failed("Valgrind reported errors") did_fail = True cat(valgrind_output_filename) remove_files([ valgrind_output_filename ]) # Check for core dump if os.path.isfile("core"): self._failed("Core-file detected: core." + test_name) did_fail = True os.rename("core", "%s/core.%s" % (self.test_home, test_name)) # Check any stderr output if os.path.isfile(stderr_expected_filename): if self._compare_files(stderr_expected_filename, stderr_output_filename): self._failed("stderr changed") did_fail = True elif os.stat(stderr_output_filename).st_size > 0: self._failed("Output was written to stderr") did_fail = True cat(stderr_output_filename) remove_files([ stderr_output_filename ]) # Check whether output graph exists, and normalize it if (not os.path.isfile(output_filename) or os.stat(output_filename).st_size == 0): self._error("No graph produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ output_filename ]) return CrmExit.ERROR normalize(output_filename) # Check whether dot output exists, and sort it if (not os.path.isfile(dot_output_filename) or os.stat(dot_output_filename).st_size == 0): self._error("No dot-file summary produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ dot_output_filename, output_filename ]) return CrmExit.ERROR with io.open(dot_output_filename, "rt") as f: first_line = f.readline() # "digraph" line with opening brace lines = f.readlines() last_line = lines[-1] # closing brace del lines[-1] lines = sorted(set(lines)) # unique sort with io.open(dot_output_filename, "wt") as f: f.write(first_line) f.writelines(lines) f.write(last_line) # Check whether score output exists, and sort it if (not os.path.isfile(score_output_filename) or os.stat(score_output_filename).st_size == 0): self._error("No allocation scores produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ score_output_filename, output_filename ]) return CrmExit.ERROR else: sort_file(score_output_filename) if self.args.update: shutil.copyfile(output_filename, expected_filename) shutil.copyfile(dot_output_filename, dot_expected_filename) shutil.copyfile(score_output_filename, scores_filename) shutil.copyfile(summary_output_filename, summary_filename) print(" Updated expected outputs") if self._compare_files(summary_filename, summary_output_filename): self._failed("summary changed") did_fail = True if self._compare_files(dot_expected_filename, dot_output_filename): self._failed("dot-file summary changed") did_fail = True else: remove_files([ dot_output_filename ]) if self._compare_files(expected_filename, output_filename): self._failed("xml-file changed") did_fail = True if self._compare_files(scores_filename, score_output_filename): self._failed("scores-file changed") did_fail = True remove_files([ output_filename, score_output_filename, summary_output_filename]) if did_fail: self.num_failed = self.num_failed + 1 return CrmExit.ERROR return CrmExit.OK def run_all(self): """ Run all defined tests """ if platform.architecture()[0] == "64bit": TESTS.extend(TESTS_64BIT) for group in TESTS: for test in group: try: args = test[2] except IndexError: args = [] self.run_one(test[0], test[1], args) print() def _print_summary(self): """ Print a summary of parameters for this test run """ print("Test home is:\t" + self.test_home) print("Test binary is:\t" + self.args.binary) if 'PCMK_schema_directory' in os.environ: print("Schema home is:\t" + os.environ['PCMK_schema_directory']) if self.valgrind_args != []: print("Activating memory testing with valgrind") print() def _test_results(self): if self.num_failed == 0: return CrmExit.OK if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0: if self.args.verbose: self._error("Results of %d failed tests (out of %d):" % (self.num_failed, self.num_tests)) cat(self.failed_filename) else: self._error("Results of %d failed tests (out of %d) are in %s" % (self.num_failed, self.num_tests, self.failed_filename)) self._error("Use -V to display them after running the tests") else: self._error("%d (of %d) tests failed (no diff results)" % (self.num_failed, self.num_tests)) if os.path.isfile(self.failed_filename): os.remove(self.failed_filename) return CrmExit.ERROR def run(self): """ Run test(s) as specified """ self._print_summary() # Zero out the error log self.failed_file = io.open(self.failed_filename, "wt") if self.args.run is None: print("Performing the following tests from " + self.args.io_dir) print() self.run_all() print() self.failed_file.close() rc = self._test_results() else: rc = self.run_one(self.args.run, "Single shot", self.single_test_args) self.failed_file.close() cat(self.failed_filename) return rc if __name__ == "__main__": sys.exit(CtsScheduler().run()) # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120: diff --git a/cts/environment.py b/cts/environment.py index 6a97b12899..cf26b5f65a 100644 --- a/cts/environment.py +++ b/cts/environment.py @@ -1,655 +1,652 @@ """ Test environment classes for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2014-2019 the Pacemaker project contributors" +__copyright__ = "Copyright 2014-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import sys, time, os, socket, random from cts.remote import * from cts.CTSvars import * class Environment(object): def __init__(self, args): self.data = {} self.Nodes = [] self["DeadTime"] = 300 self["StartTime"] = 300 self["StableTime"] = 30 self["tests"] = [] self["IPagent"] = "IPaddr2" self["DoStandby"] = 1 self["DoFencing"] = 1 self["XmitLoss"] = "0.0" self["RecvLoss"] = "0.0" self["ClobberCIB"] = 0 self["CIBfilename"] = None self["CIBResource"] = 0 self["DoBSC"] = 0 self["oprofile"] = [] self["warn-inactive"] = 0 self["ListTests"] = 0 self["benchmark"] = 0 self["LogWatcher"] = "any" self["SyslogFacility"] = "daemon" self["LogFileName"] = "/var/log/messages" self["Schema"] = "pacemaker-3.0" self["Stack"] = "corosync" self["stonith-type"] = "external/ssh" self["stonith-params"] = "hostlist=all,livedangerously=yes" self["notification-agent"] = "/var/lib/pacemaker/notify.sh" self["notification-recipient"] = "/var/lib/pacemaker/notify.log" self["loop-minutes"] = 60 self["valgrind-procs"] = "pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd" self["experimental-tests"] = 0 self["container-tests"] = 0 self["valgrind-tests"] = 0 self["unsafe-tests"] = 1 self["loop-tests"] = 1 self["scenario"] = "random" self["stats"] = 0 self["docker"] = 0 self["continue"] = 0 self.RandomGen = random.Random() self.logger = LogFactory() self.SeedRandom() self.rsh = RemoteFactory().getInstance() self.target = "localhost" self.parse_args(args) if self["ListTests"] == 0: self.validate() self.discover() def SeedRandom(self, seed=None): if not seed: seed = int(time.time()) self["RandSeed"] = seed self.RandomGen.seed(str(seed)) def dump(self): keys = [] for key in list(self.data.keys()): keys.append(key) keys.sort() for key in keys: self.logger.debug("Environment["+key+"]:\t"+str(self[key])) def keys(self): return list(self.data.keys()) def has_key(self, key): if key == "nodes": return True return key in self.data def __getitem__(self, key): if str(key) == "0": raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead") if key == "nodes": return self.Nodes elif key == "Name": return self.get_stack_short() elif key in self.data: return self.data[key] else: return None def __setitem__(self, key, value): if key == "Stack": self.set_stack(value) elif key == "node-limit": self.data[key] = value self.filter_nodes() elif key == "nodes": self.Nodes = [] for node in value: # I don't think I need the IP address, etc. but this validates # the node name against /etc/hosts and/or DNS, so it's a # GoodThing(tm). try: n = node.strip() if self.data["docker"] == 0: socket.gethostbyname_ex(n) self.Nodes.append(n) except: self.logger.log(node+" not found in DNS... aborting") raise self.filter_nodes() else: self.data[key] = value def RandomNode(self): '''Choose a random node from the cluster''' return self.RandomGen.choice(self["nodes"]) def set_stack(self, name): # Normalize stack names if name == "corosync" or name == "cs" or name == "mcp": self.data["Stack"] = "corosync 2+" else: raise ValueError("Unknown stack: "+name) def get_stack_short(self): # Create the Cluster Manager object if not "Stack" in self.data: return "unknown" elif self.data["Stack"] == "corosync 2+": if self["docker"]: return "crm-corosync-docker" else: return "crm-corosync" else: LogFactory().log("Unknown stack: "+self["stack"]) raise ValueError("Unknown stack: "+self["stack"]) def detect_syslog(self): # Detect syslog variant if not "syslogd" in self.data: if self["have_systemd"]: # Systemd self["syslogd"] = self.rsh(self.target, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", stdout=1).strip() else: # SYS-V self["syslogd"] = self.rsh(self.target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", stdout=1).strip() if not "syslogd" in self.data or not self["syslogd"]: # default self["syslogd"] = "rsyslog" def disable_service(self, node, service): if self["have_systemd"]: # Systemd return self.rsh(node, "systemctl disable %s" % service) else: # SYS-V return self.rsh(node, "chkconfig %s off" % service) def enable_service(self, node, service): if self["have_systemd"]: # Systemd return self.rsh(node, "systemctl enable %s" % service) else: # SYS-V return self.rsh(node, "chkconfig %s on" % service) def service_is_enabled(self, node, service): if self["have_systemd"]: # Systemd # With "systemctl is-enabled", we should check if the service is # explicitly "enabled" instead of the return code. For example it returns # 0 if the service is "static" or "indirect", but they don't really count # as "enabled". return not self.rsh(node, "systemctl is-enabled %s | grep enabled" % service) else: # SYS-V return not self.rsh(node, "chkconfig --list | grep -e %s.*on" % service) def detect_at_boot(self): # Detect if the cluster starts at boot if not "at-boot" in self.data: self["at-boot"] = self.service_is_enabled(self.target, "corosync") \ or self.service_is_enabled(self.target, "pacemaker") def detect_ip_offset(self): # Try to determine an offset for IPaddr resources if self["CIBResource"] and not "IPBase" in self.data: network=self.rsh(self.target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", stdout=1).strip() self["IPBase"] = self.rsh(self.target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, stdout=1).strip() if not self["IPBase"]: self["IPBase"] = " fe80::1234:56:7890:1000" self.logger.log("Could not determine an offset for IPaddr resources. Perhaps nmap is not installed on the nodes.") self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"]) elif int(self["IPBase"].split('.')[3]) >= 240: self.logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s" % (self["IPBase"], self["IPBase"].split('.')[3])) self["IPBase"] = " fe80::1234:56:7890:1000" self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"]) def filter_nodes(self): if self['node-limit'] is not None and self["node-limit"] > 0: if len(self["nodes"]) > self["node-limit"]: self.logger.log("Limiting the number of nodes configured=%d (max=%d)" %(len(self["nodes"]), self["node-limit"])) while len(self["nodes"]) > self["node-limit"]: self["nodes"].pop(len(self["nodes"])-1) def validate(self): if len(self["nodes"]) < 1: print("No nodes specified!") sys.exit(1) def discover(self): self.target = random.Random().choice(self["nodes"]) exerciser = socket.gethostname() # Use the IP where possible to avoid name lookup failures for ip in socket.gethostbyname_ex(exerciser)[2]: if ip != "127.0.0.1": exerciser = ip break; self["cts-exerciser"] = exerciser if not "have_systemd" in self.data: self["have_systemd"] = not self.rsh(self.target, "systemctl list-units", silent=True) self.detect_syslog() self.detect_at_boot() self.detect_ip_offset() def parse_args(self, args): skipthis=None if not args: args=sys.argv[1:] for i in range(0, len(args)): if skipthis: skipthis=None continue elif args[i] == "-l" or args[i] == "--limit-nodes": skipthis=1 self["node-limit"] = int(args[i+1]) elif args[i] == "-r" or args[i] == "--populate-resources": self["CIBResource"] = 1 self["ClobberCIB"] = 1 elif args[i] == "--outputfile": skipthis=1 self["OutputFile"] = args[i+1] LogFactory().add_file(self["OutputFile"]) elif args[i] == "-L" or args[i] == "--logfile": skipthis=1 self["LogWatcher"] = "remote" self["LogAuditDisabled"] = 1 self["LogFileName"] = args[i+1] elif args[i] == "--ip" or args[i] == "--test-ip-base": skipthis=1 self["IPBase"] = args[i+1] self["CIBResource"] = 1 self["ClobberCIB"] = 1 elif args[i] == "--oprofile": skipthis=1 self["oprofile"] = args[i+1].split(' ') elif args[i] == "--trunc": self["TruncateLog"]=1 elif args[i] == "--list-tests" or args[i] == "--list" : self["ListTests"]=1 elif args[i] == "--benchmark": self["benchmark"]=1 elif args[i] == "--bsc": self["DoBSC"] = 1 self["scenario"] = "basic-sanity" elif args[i] == "--qarsh": RemoteFactory().enable_qarsh() elif args[i] == "--docker": self["docker"] = 1 RemoteFactory().enable_docker() elif args[i] == "--yes" or args[i] == "-y": self["continue"] = 1 elif args[i] == "--stonith" or args[i] == "--fencing": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": self["DoFencing"]=1 elif args[i+1] == "0" or args[i+1] == "no": self["DoFencing"]=0 elif args[i+1] == "phd": self["DoStonith"]=1 self["stonith-type"] = "fence_phd_kvm" elif args[i+1] == "rhcs" or args[i+1] == "xvm" or args[i+1] == "virt": self["DoStonith"]=1 self["stonith-type"] = "fence_xvm" elif args[i+1] == "docker": self["DoStonith"]=1 self["stonith-type"] = "fence_docker_cts" elif args[i+1] == "scsi": self["DoStonith"]=1 self["stonith-type"] = "fence_scsi" elif args[i+1] == "ssh" or args[i+1] == "lha": self["DoStonith"]=1 self["stonith-type"] = "external/ssh" self["stonith-params"] = "hostlist=all,livedangerously=yes" elif args[i+1] == "north": self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=north-apc,login=apc,passwd=apc,pcmk_host_map=north-01:2;north-02:3;north-03:4;north-04:5;north-05:6;north-06:7;north-07:9;north-08:10;north-09:11;north-10:12;north-11:13;north-12:14;north-13:15;north-14:18;north-15:17;north-16:19;" elif args[i+1] == "south": self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=south-apc,login=apc,passwd=apc,pcmk_host_map=south-01:2;south-02:3;south-03:4;south-04:5;south-05:6;south-06:7;south-07:9;south-08:10;south-09:11;south-10:12;south-11:13;south-12:14;south-13:15;south-14:18;south-15:17;south-16:19;" elif args[i+1] == "east": self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;" elif args[i+1] == "west": self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=west-apc,login=apc,passwd=apc,pcmk_host_map=west-01:2;west-02:3;west-03:4;west-04:5;west-05:6;west-06:7;west-07:9;west-08:10;west-09:11;west-10:12;west-11:13;west-12:14;west-13:15;west-14:18;west-15:17;west-16:19;" elif args[i+1] == "openstack": self["DoStonith"]=1 self["stonith-type"] = "fence_openstack" print("Obtaining OpenStack credentials from the current environment") self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % ( os.environ['OS_REGION_NAME'], os.environ['OS_TENANT_NAME'], os.environ['OS_AUTH_URL'], os.environ['OS_USERNAME'], os.environ['OS_PASSWORD'] ) elif args[i+1] == "rhevm": self["DoStonith"]=1 self["stonith-type"] = "fence_rhevm" print("Obtaining RHEV-M credentials from the current environment") self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % ( os.environ['RHEVM_USERNAME'], os.environ['RHEVM_PASSWORD'], os.environ['RHEVM_SERVER'], os.environ['RHEVM_PORT'], ) else: self.usage(args[i+1]) elif args[i] == "--stonith-type": self["stonith-type"] = args[i+1] skipthis=1 elif args[i] == "--stonith-args": self["stonith-params"] = args[i+1] skipthis=1 elif args[i] == "--standby": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": self["DoStandby"] = 1 elif args[i+1] == "0" or args[i+1] == "no": self["DoStandby"] = 0 else: self.usage(args[i+1]) elif args[i] == "--clobber-cib" or args[i] == "-c": self["ClobberCIB"] = 1 elif args[i] == "--cib-filename": skipthis=1 self["CIBfilename"] = args[i+1] elif args[i] == "--xmit-loss": try: float(args[i+1]) except ValueError: print("--xmit-loss parameter should be float") self.usage(args[i+1]) skipthis=1 self["XmitLoss"] = args[i+1] elif args[i] == "--recv-loss": try: float(args[i+1]) except ValueError: print("--recv-loss parameter should be float") self.usage(args[i+1]) skipthis=1 self["RecvLoss"] = args[i+1] elif args[i] == "--choose": skipthis=1 self["tests"].append(args[i+1]) self["scenario"] = "sequence" elif args[i] == "--nodes": skipthis=1 self["nodes"] = args[i+1].split(' ') elif args[i] == "-g" or args[i] == "--group" or args[i] == "--dsh-group": skipthis=1 self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args[i+1]) LogFactory().add_file(self["OutputFile"], "CTS") dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args[i+1]) # Hacks to make my life easier if args[i+1] == "virt1": self["Stack"] = "corosync" self["DoStonith"]=1 self["stonith-type"] = "fence_xvm" self["stonith-params"] = "delay=0" self["IPBase"] = " fe80::1234:56:7890:1000" elif args[i+1] == "east16" or args[i+1] == "nsew": self["Stack"] = "corosync" self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;" self["IPBase"] = " fe80::1234:56:7890:2000" if args[i+1] == "east16": # Requires newer python than available via nsew self["IPagent"] = "Dummy" elif args[i+1] == "corosync8": self["Stack"] = "corosync" self["DoStonith"]=1 self["stonith-type"] = "fence_rhevm" print("Obtaining RHEV-M credentials from the current environment") self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % ( os.environ['RHEVM_USERNAME'], os.environ['RHEVM_PASSWORD'], os.environ['RHEVM_SERVER'], os.environ['RHEVM_PORT'], ) self["IPBase"] = " fe80::1234:56:7890:3000" if os.path.isfile(dsh_file): self["nodes"] = [] f = open(dsh_file, 'r') for line in f: l = line.strip().rstrip() if not l.startswith('#'): self["nodes"].append(l) f.close() else: print("Unknown DSH group: %s" % args[i+1]) elif args[i] == "--syslog-facility" or args[i] == "--facility": skipthis=1 self["SyslogFacility"] = args[i+1] elif args[i] == "--seed": skipthis=1 self.SeedRandom(args[i+1]) elif args[i] == "--warn-inactive": self["warn-inactive"] = 1 elif args[i] == "--schema": skipthis=1 self["Schema"] = args[i+1] elif args[i] == "--at-boot" or args[i] == "--cluster-starts-at-boot": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": self["at-boot"] = 1 elif args[i+1] == "0" or args[i+1] == "no": self["at-boot"] = 0 else: self.usage(args[i+1]) elif args[i] == "--stack": if args[i+1] == "fedora" or args[i+1] == "fedora-17" or args[i+1] == "fedora-18": self["Stack"] = "corosync" elif args[i+1] == "rhel-7": self["Stack"] = "corosync" else: self["Stack"] = args[i+1] skipthis=1 elif args[i] == "--once": self["scenario"] = "all-once" elif args[i] == "--boot": self["scenario"] = "boot" elif args[i] == "--notification-agent": self["notification-agent"] = args[i+1] skipthis = 1 elif args[i] == "--notification-recipient": self["notification-recipient"] = args[i+1] skipthis = 1 elif args[i] == "--valgrind-tests": self["valgrind-tests"] = 1 elif args[i] == "--valgrind-procs": self["valgrind-procs"] = args[i+1] skipthis = 1 elif args[i] == "--no-loop-tests": self["loop-tests"] = 0 elif args[i] == "--loop-minutes": skipthis=1 try: self["loop-minutes"]=int(args[i+1]) except ValueError: self.usage(args[i]) elif args[i] == "--no-unsafe-tests": self["unsafe-tests"] = 0 elif args[i] == "--experimental-tests": self["experimental-tests"] = 1 elif args[i] == "--container-tests": self["container-tests"] = 1 elif args[i] == "--set": skipthis=1 (name, value) = args[i+1].split('=') self[name] = value print("Setting %s = %s" % (name, value)) elif args[i] == "--help": self.usage(args[i], 0) elif args[i] == "--": break else: try: NumIter=int(args[i]) self["iterations"] = NumIter except ValueError: self.usage(args[i]) def usage(self, arg, status=1): if status: print("Illegal argument %s" % arg) print("usage: " + sys.argv[0] +" [options] number-of-iterations") print("\nCommon options: ") print("\t [--nodes 'node list'] list of cluster nodes separated by whitespace") print("\t [--group | -g 'name'] use the nodes listed in the named DSH group (~/.dsh/groups/$name)") print("\t [--limit-nodes max] only use the first 'max' cluster nodes supplied with --nodes") print("\t [--stack corosync] which cluster stack is installed") print("\t [--list-tests] list the valid tests") print("\t [--benchmark] add the timing information") print("\t ") print("Options that CTS will usually auto-detect correctly: ") print("\t [--logfile path] where should the test software look for logs from cluster nodes") print("\t [--syslog-facility name] which syslog facility should the test software log to") print("\t [--at-boot (1|0)] does the cluster software start at boot time") print("\t [--test-ip-base ip] offset for generated IP address resources") print("\t ") print("Options for release testing: ") print("\t [--populate-resources | -r] generate a sample configuration") print("\t [--choose name] run only the named test") print("\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]") print("\t [--once] run all valid tests once") print("\t ") print("Additional (less common) options: ") print("\t [--clobber-cib | -c ] erase any existing configuration") print("\t [--outputfile path] optional location for the test software to write logs to") print("\t [--trunc] truncate logfile before starting") print("\t [--xmit-loss lost-rate(0.0-1.0)]") print("\t [--recv-loss lost-rate(0.0-1.0)]") print("\t [--standby (1 | 0 | yes | no)]") print("\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]") print("\t [--stonith-type type]") print("\t [--stonith-args name=value]") print("\t [--bsc]") print("\t [--notification-agent path] script to configure for Pacemaker alerts") print("\t [--notification-recipient r] recipient to pass to alert script") print("\t [--no-loop-tests] don't run looping/time-based tests") print("\t [--no-unsafe-tests] don't run tests that are unsafe for use with ocfs2/drbd") print("\t [--valgrind-tests] include tests using valgrind") print("\t [--experimental-tests] include experimental tests") print("\t [--container-tests] include pacemaker_remote tests that run in lxc container resources") print("\t [--oprofile 'node list'] list of cluster nodes to run oprofile on]") print("\t [--qarsh] use the QARSH backdoor to access nodes instead of SSH") print("\t [--docker] Indicates nodes are docker nodes.") print("\t [--seed random_seed]") print("\t [--set option=value]") print("\t [--yes | -y] continue to run cts when there is an interaction whether to continue running pacemaker-cts") print("\t ") print("\t Example: ") # @PYTHON@ would be better here but not worth making file this a .in print("\t python sys.argv[0] -g virt1 -r --stonith ssh --schema pacemaker-2.0 500") sys.exit(status) class EnvFactory(object): instance = None def __init__(self): pass def getInstance(self, args=None): if not EnvFactory.instance: EnvFactory.instance = Environment(args) return EnvFactory.instance diff --git a/cts/fence_dummy.in b/cts/fence_dummy.in index 8b0dd51659..00552fea71 100644 --- a/cts/fence_dummy.in +++ b/cts/fence_dummy.in @@ -1,499 +1,496 @@ #!@PYTHON@ """Dummy fence agent for testing """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2012-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2012-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import re import sys import time import random import atexit import getopt AGENT_VERSION = "4.0.1" OCF_VERSION = "1.0" SHORT_DESC = "Dummy fence agent" LONG_DESC = """fence_dummy is a fake fencing agent which reports success based on its mode (pass|fail|random) without doing anything.""" # Short options used: difhmnoqsvBDHMRUV ALL_OPT = { "quiet" : { "getopt" : "q", "help" : "", "order" : 50 }, "verbose" : { "getopt" : "v", "longopt" : "verbose", "help" : "-v, --verbose Verbose mode", "required" : "0", "shortdesc" : "Verbose mode", "order" : 51 }, "debug" : { "getopt" : "D:", "longopt" : "debug-file", "help" : "-D, --debug-file=[debugfile] Debugging to output file", "required" : "0", "shortdesc" : "Write debug information to given file", "order" : 52 }, "version" : { "getopt" : "V", "longopt" : "version", "help" : "-V, --version Display version information and exit", "required" : "0", "shortdesc" : "Display version information and exit", "order" : 53 }, "help" : { "getopt" : "h", "longopt" : "help", "help" : "-h, --help Display this help and exit", "required" : "0", "shortdesc" : "Display help and exit", "order" : 54 }, "action" : { "getopt" : "o:", "longopt" : "action", "help" : "-o, --action=[action] Action: status, list, reboot (default), off or on", "required" : "1", "shortdesc" : "Fencing Action", "default" : "reboot", "order" : 1 }, "nodename" : { "getopt" : "N:", "longopt" : "nodename", "help" : "-N, --nodename Node name of fence victim (ignored)", "required" : "0", "shortdesc" : "The node name of fence victim (ignored)", "order" : 2 }, "mode": { "getopt" : "M:", "longopt" : "mode", "required" : "0", "help" : "-M, --mode=(pass|fail|random) Exit status to return for non-monitor operations", "shortdesc" : "Whether fence operations should always pass, always fail, or fail at random", "order" : 3 }, "monitor_mode" : { "getopt" : "m:", "longopt" : "monitor_mode", "help" : "-m, --monitor_mode=(pass|fail|random) Exit status to return for monitor operations", "required" : "0", "shortdesc" : "Whether monitor operations should always pass, always fail, or fail at random", "order" : 3 }, "random_sleep_range": { "getopt" : "R:", "required" : "0", "longopt" : "random_sleep_range", "help" : "-R, --random_sleep_range=[seconds] Sleep between 1 and [seconds] before returning", "shortdesc" : "Wait randomly between 1 and [seconds]", "order" : 3 }, "mock_dynamic_hosts" : { "getopt" : "H:", "longopt" : "mock_dynamic_hosts", "help" : "-H, --mock_dynamic_hosts=[list] What to return when dynamically queried for possible targets", "required" : "0", "shortdesc" : "A list of hosts we can fence", "order" : 3 }, "delay" : { "getopt" : "f:", "longopt" : "delay", "help" : "-f, --delay [seconds] Wait X seconds before fencing is started", "required" : "0", "shortdesc" : "Wait X seconds before fencing is started", "default" : "0", "order" : 3 }, "monitor_delay" : { "getopt" : "d:", "longopt" : "monitor_delay", "help" : "-d, --monitor_delay [seconds] Wait X seconds before monitor completes", "required" : "0", "shortdesc" : "Wait X seconds before monitor completes", "default" : "0", "order" : 3 }, "plug" : { "getopt" : "n:", "longopt" : "plug", "help" : "-n, --plug=[id] Physical plug number on device (ignored)", "required" : "1", "shortdesc" : "Ignored", "order" : 4 }, "port" : { "getopt" : "n:", "longopt" : "plug", "help" : "-n, --plug=[id] Physical plug number on device (ignored)", "required" : "1", "shortdesc" : "Ignored", "order" : 4 }, "switch" : { "getopt" : "s:", "longopt" : "switch", "help" : "-s, --switch=[id] Physical switch number on device (ignored)", "required" : "0", "shortdesc" : "Ignored", "order" : 4 }, "nodeid" : { "getopt" : "i:", "longopt" : "nodeid", "help" : "-i, --nodeid Corosync id of fence victim (ignored)", "required" : "0", "shortdesc" : "Ignored", "order" : 4 }, "uuid" : { "getopt" : "U:", "longopt" : "uuid", "help" : "-U, --uuid UUID of the VM to fence (ignored)", "required" : "0", "shortdesc" : "Ignored", "order" : 4 } } auto_unfence = False no_reboot = False def agent(): """ Return name this file was run as. """ return os.path.basename(sys.argv[0]) def fail_usage(message): """ Print a usage message and exit. """ sys.exit("%s\nPlease use '-h' for usage" % message) def show_docs(options): """ Handle informational options (display info and exit). """ device_opt = options["device_opt"] if "-h" in options: usage(device_opt) sys.exit(0) if "-o" in options and options["-o"].lower() == "metadata": metadata(device_opt, options) sys.exit(0) if "-V" in options: print(AGENT_VERSION) sys.exit(0) def sorted_options(avail_opt): """ Return a list of all options, in their internally specified order. """ sorted_list = [(key, ALL_OPT[key]) for key in avail_opt] sorted_list.sort(key=lambda x: x[1]["order"]) return sorted_list def usage(avail_opt): """ Print a usage message. """ print("Usage:") print("\t" + agent() + " [options]") print("Options:") for dummy, value in sorted_options(avail_opt): if len(value["help"]) != 0: print(" " + value["help"]) def metadata(avail_opt, options): """ Print agent metadata. """ # This log is just for testing handling of stderr output print("asked for fence_dummy metadata", file=sys.stderr) print(""" %s %s """ % (agent(), SHORT_DESC, AGENT_VERSION, OCF_VERSION, LONG_DESC)) for option, dummy in sorted_options(avail_opt): if "shortdesc" in ALL_OPT[option]: print(' ') default = "" default_name_arg = "-" + ALL_OPT[option]["getopt"][:-1] default_name_no_arg = "-" + ALL_OPT[option]["getopt"] if "default" in ALL_OPT[option]: default = 'default="%s"' % str(ALL_OPT[option]["default"]) elif default_name_arg in options: if options[default_name_arg]: try: default = 'default="%s"' % options[default_name_arg] except TypeError: ## @todo/@note: Currently there is no clean way how to handle lists ## we can create a string from it but we can't set it on command line default = 'default="%s"' % str(options[default_name_arg]) elif default_name_no_arg in options: default = 'default="true"' mixed = ALL_OPT[option]["help"] ## split it between option and help text res = re.compile(r"^(.*--\S+)\s+", re.IGNORECASE | re.S).search(mixed) if None != res: mixed = res.group(1) mixed = mixed.replace("<", "<").replace(">", ">") print(' ') if ALL_OPT[option]["getopt"].count(":") > 0: print(' ') else: print(' ') print(' ' + ALL_OPT[option]["shortdesc"] + '') print(' ') print(' \n ') if auto_unfence: attr_name = 'automatic' else: attr_name = 'on_target' print(' ') print(' ') if not no_reboot: print(' ') print(' ') print(' ') print(' ') print(' ') print(' ') print('') def option_longopt(option): """ Return the getopt-compatible long-option name of the given option. """ if ALL_OPT[option]["getopt"].endswith(":"): return ALL_OPT[option]["longopt"] + "=" else: return ALL_OPT[option]["longopt"] def opts_from_command_line(argv, avail_opt): """ Read options from command-line arguments. """ # Prepare list of options for getopt getopt_string = "" longopt_list = [] for k in avail_opt: if k in ALL_OPT: getopt_string += ALL_OPT[k]["getopt"] else: fail_usage("Parse error: unknown option '"+k+"'") if k in ALL_OPT and "longopt" in ALL_OPT[k]: longopt_list.append(option_longopt(k)) try: opt, dummy = getopt.gnu_getopt(argv, getopt_string, longopt_list) except getopt.GetoptError as error: fail_usage("Parse error: " + error.msg) # Transform longopt to short one which are used in fencing agents old_opt = opt opt = {} for old_option in dict(old_opt).keys(): if old_option.startswith("--"): for option in ALL_OPT.keys(): if "longopt" in ALL_OPT[option] and "--" + ALL_OPT[option]["longopt"] == old_option: opt["-" + ALL_OPT[option]["getopt"].rstrip(":")] = dict(old_opt)[old_option] else: opt[old_option] = dict(old_opt)[old_option] # Compatibility Layer (with what? probably not needed for fence_dummy) new_opt = dict(opt) if "-T" in new_opt: new_opt["-o"] = "status" if "-n" in new_opt: new_opt["-m"] = new_opt["-n"] opt = new_opt return opt def opts_from_stdin(avail_opt): """ Read options from standard input. """ opt = {} name = "" for line in sys.stdin.readlines(): line = line.strip() if line.startswith("#") or (len(line) == 0): continue (name, value) = (line + "=").split("=", 1) value = value[:-1] # Compatibility Layer (with what? probably not needed for fence_dummy) if name == "option": name = "action" if name not in avail_opt: print("Parse error: Ignoring unknown option '%s'" % line, file=sys.stderr) continue if ALL_OPT[name]["getopt"].endswith(":"): opt["-"+ALL_OPT[name]["getopt"].rstrip(":")] = value elif value.lower() in ["1", "yes", "on", "true"]: opt["-"+ALL_OPT[name]["getopt"]] = "1" return opt def process_input(avail_opt): """ Set standard environment variables, and parse all options. """ # Set standard environment os.putenv("LANG", "C") os.putenv("LC_ALL", "C") # Read options from command line or standard input if len(sys.argv) > 1: return opts_from_command_line(sys.argv[1:], avail_opt) else: return opts_from_stdin(avail_opt) def atexit_handler(): """ Close stdout on exit. """ try: sys.stdout.close() os.close(1) except IOError: sys.exit("%s failed to close standard output" % agent()) def success_mode(options, option, default_value): """ Return exit code specified by option. """ if option in options: test_value = options[option] else: test_value = default_value if test_value == "pass": exitcode = 0 elif test_value == "fail": exitcode = 1 else: exitcode = random.randint(0, 1) return exitcode def write_options(options): """ Write out all options to debug file. """ try: debugfile = io.open(options["-D"], 'at') debugfile.write("### %s ###\n" % (time.strftime("%Y-%m-%d %H:%M:%S"))) for option in sorted(options): debugfile.write("%s=%s\n" % (option, options[option])) debugfile.write("###\n") debugfile.close() except IOError: pass def main(): """ Make it so! """ global auto_unfence global no_reboot # Meta-data can't take parameters, so we simulate different meta-data # behavior based on the executable name (which can be a symbolic link). if (sys.argv[0].endswith("_auto_unfence")): auto_unfence = True elif (sys.argv[0].endswith("_no_reboot")): no_reboot = True device_opt = ALL_OPT.keys() ## Defaults for fence agent atexit.register(atexit_handler) options = process_input(device_opt) options["device_opt"] = device_opt show_docs(options) # dump input to file if "-D" in options: write_options(options) if "-f" in options: val = int(options["-f"]) print("delay sleep for %d seconds" % val, file=sys.stderr) time.sleep(val) # random sleep for testing if "-R" in options: val = int(options["-R"]) ran = random.randint(1, val) print("random sleep for %d seconds" % ran, file=sys.stderr) time.sleep(ran) if "-o" in options: action = options["-o"] else: action = "action" if action == "monitor": if "-d" in options: time.sleep(int(options["-d"])) exitcode = success_mode(options, "-m", "pass") elif action == "list": print("fence_dummy action (list) called", file=sys.stderr) if "-H" in options: print(options["-H"]) exitcode = 0 else: print("dynamic hostlist requires mock_dynamic_hosts to be set", file=sys.stderr) exitcode = 1 else: exitcode = success_mode(options, "-M", "random") # Ensure we generate some error output on failure exit. if exitcode == 1: print("simulated %s failure" % action, file=sys.stderr) sys.exit(exitcode) if __name__ == "__main__": main() diff --git a/cts/logging.py b/cts/logging.py index bd677371ea..a618b8fc92 100644 --- a/cts/logging.py +++ b/cts/logging.py @@ -1,159 +1,126 @@ """ Logging classes for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2014-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2014-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import sys import time -# Wrapper to detect a string under Python 2 or 3 -try: - _StringType = basestring -except NameError: - _StringType = str - -def _is_string(obj): - """ Return True if obj is a simple string. """ - - return isinstance(obj, _StringType) - - -def _strip(line): - """ Wrapper for strip() that works regardless of Python version """ - - if sys.version_info < (3,): - return line.decode('utf-8').strip() - else: - return line.strip() - - -def _rstrip(line): - """ Wrapper for rstrip() that works regardless of Python version """ - - if sys.version_info < (3,): - return line.decode('utf-8').rstrip() - else: - return line.rstrip() - - class Logger(object): """ Abstract class to use as parent for CTS logging classes """ TimeFormat = "%b %d %H:%M:%S\t" def __init__(self): # Whether this logger should print debug messages self.debug_target = True def __call__(self, lines): """ Log specified messages """ raise ValueError("Abstract class member (__call__)") def write(self, line): """ Log a single line excluding trailing whitespace """ - return self(_rstrip(line)) + return self(line.rstrip()) def writelines(self, lines): """ Log a series of lines excluding trailing whitespace """ for line in lines: self.write(line) return 1 def is_debug_target(self): """ Return True if this logger should receive debug messages """ return self.debug_target class StdErrLog(Logger): """ Class to log to standard error """ def __init__(self, filename, tag): Logger.__init__(self) self.debug_target = False def __call__(self, lines): """ Log specified lines to stderr """ timestamp = time.strftime(Logger.TimeFormat, time.localtime(time.time())) - if _is_string(lines): + if isinstance(lines, str): lines = [lines] for line in lines: print("%s%s" % (timestamp, line), file=sys.__stderr__) sys.__stderr__.flush() class FileLog(Logger): """ Class to log to a file """ def __init__(self, filename, tag): Logger.__init__(self) self.logfile = filename self.hostname = os.uname()[1] if tag: self.source = tag + ": " else: self.source = "" def __call__(self, lines): """ Log specified lines to the file """ logf = io.open(self.logfile, "at") timestamp = time.strftime(Logger.TimeFormat, time.localtime(time.time())) - if _is_string(lines): + if isinstance(lines, str): lines = [lines] for line in lines: print("%s%s %s%s" % (timestamp, self.hostname, self.source, line), file=logf) logf.close() class LogFactory(object): """ Singleton to log messages to various destinations """ log_methods = [] have_stderr = False def add_file(self, filename, tag=None): """ When logging messages, log them to specified file """ if filename: LogFactory.log_methods.append(FileLog(filename, tag)) def add_stderr(self): """ When logging messages, log them to standard error """ if not LogFactory.have_stderr: LogFactory.have_stderr = True LogFactory.log_methods.append(StdErrLog(None, None)) def log(self, args): """ Log a message (to all configured log destinations) """ for logfn in LogFactory.log_methods: - logfn(_strip(args)) + logfn(args.strip()) def debug(self, args): """ Log a debug message (to all configured log destinations) """ for logfn in LogFactory.log_methods: if logfn.is_debug_target(): - logfn("debug: %s" % _strip(args)) + logfn("debug: %s" % args.strip()) def traceback(self, traceback): """ Log a stack trace (to all configured log destinations) """ for logfn in LogFactory.log_methods: traceback.print_exc(50, logfn) diff --git a/cts/pacemaker-cts-dummyd.in b/cts/pacemaker-cts-dummyd.in index bde98c5c9f..453a644673 100644 --- a/cts/pacemaker-cts-dummyd.in +++ b/cts/pacemaker-cts-dummyd.in @@ -1,58 +1,55 @@ #!@PYTHON@ """ Slow-starting idle daemon that notifies systemd when it starts """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2014-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2014-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import sys import time import signal import subprocess have_systemd_daemon = True try: import systemd.daemon except ImportError: have_systemd_daemon = False delay = None def parse_args(): global delay # Lone argument is a number of seconds to delay start and stop if len(sys.argv) > 0: try: delay = float(sys.argv[1]) except ValueError: delay = None def twiddle(): global delay if delay is not None: time.sleep(delay) def bye(signum, frame): twiddle() sys.exit(0) if __name__ == "__main__": parse_args() signal.signal(signal.SIGTERM, bye) twiddle() if have_systemd_daemon: systemd.daemon.notify("READY=1") else: subprocess.call(["systemd-notify", "READY=1"]) # This isn't a "proper" daemon, but that would be overkill for testing purposes while True: time.sleep(600.0) diff --git a/cts/patterns.py b/cts/patterns.py index 71e5414c2c..d030b21954 100644 --- a/cts/patterns.py +++ b/cts/patterns.py @@ -1,413 +1,410 @@ """ Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2008-2019 the Pacemaker project contributors" +__copyright__ = "Copyright 2008-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import sys, os from cts.CTSvars import * patternvariants = {} class BasePatterns(object): def __init__(self, name): self.name = name patternvariants[name] = self self.ignore = [ "avoid confusing Valgrind", # Logging bug in some versions of libvirtd r"libvirtd.*: internal error: Failed to parse PCI config address", # pcs can log this when node is fenced, but fencing is OK in some # tests (and we will catch it in pacemaker logs when not OK) r"pcs.daemon:No response from: .* request: get_configs, error:", ] self.BadNews = [] self.components = {} self.commands = { "StatusCmd" : "crmadmin -t 60000 -S %s 2>/dev/null", "CibQuery" : "cibadmin -Ql", "CibAddXml" : "cibadmin --modify -c --xml-text %s", "CibDelXpath" : "cibadmin --delete --xpath %s", # 300,000 == 5 minutes "RscRunning" : CTSvars.CRM_DAEMON_DIR + "/cts-exec-helper -R -r %s", "CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml", "TmpDir" : "/tmp", "BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1", "FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1", # tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit # tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated # tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1 # tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null "ReduceCommCmd" : "", "RestoreCommCmd" : "tc qdisc del dev lo root", "MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''", "MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"", "StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null", "StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null", } self.search = { "Pat:DC_IDLE" : "pacemaker-controld.*State transition.*-> S_IDLE", # This won't work if we have multiple partitions "Pat:Local_started" : "%s\W.*controller successfully started", "Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC", "Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE", "Pat:We_stopped" : "%s\W.*OVERRIDE THIS PATTERN", "Pat:They_stopped" : "%s\W.*LOST:.* %s ", "Pat:They_dead" : "node %s.*: is dead", "Pat:TransitionComplete" : "Transition status: Complete: complete", "Pat:Fencing_start" : r"Requesting peer fencing .* targeting %s", "Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK", "Pat:Fencing_recover" : r"pacemaker-schedulerd.*: Recover %s", "Pat:Fencing_active" : r"stonith resource .* is active on 2 nodes (attempting recovery)", "Pat:Fencing_probe" : r"pacemaker-controld.* Result of probe operation for %s on .*: Error", "Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok", "Pat:RscOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ", "Pat:CloneOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ", "Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok", "Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK", "Pat:FenceOpOK" : "Operation .* targeting %s using .* returned 0", } def get_component(self, key): if key in self.components: return self.components[key] print("Unknown component '%s' for %s" % (key, self.name)) return [] def get_patterns(self, key): if key == "BadNews": return self.BadNews elif key == "BadNewsIgnore": return self.ignore elif key == "Commands": return self.commands elif key == "Search": return self.search elif key == "Components": return self.components def __getitem__(self, key): if key == "Name": return self.name elif key in self.commands: return self.commands[key] elif key in self.search: return self.search[key] else: print("Unknown template '%s' for %s" % (key, self.name)) return None class crm_corosync(BasePatterns): ''' Patterns for Corosync version 2 cluster manager class ''' def __init__(self, name): BasePatterns.__init__(self, name) self.commands.update({ "StartCmd" : "service corosync start && service pacemaker start", "StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop", "EpochCmd" : "crm_node -e", "QuorumCmd" : "crm_node -q", "PartitionCmd" : "crm_node -p", }) self.search.update({ # Close enough ... "Corosync Cluster Engine exiting normally" isn't # printed reliably. "Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines", "Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost", "Pat:They_dead" : "pacemaker-controld.*Node %s(\[|\s).*state is now lost", "Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(", # "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes() "Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)", "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s", "Pat:InfraUp" : "%s\W.*corosync.*Initializing transport", "Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker", }) self.ignore = self.ignore + [ r"crm_mon:", r"crmadmin:", r"update_trace_data", r"async_notify:.*strange, client not found", r"Parse error: Ignoring unknown option .*nodename", r"error.*: Operation 'reboot' .* using FencingFail returned ", r"getinfo response error: 1$", r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE", r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)", ] self.BadNews = [ r"[^(]error:", r"crit:", r"ERROR:", r"CRIT:", r"Shutting down...NOW", r"Timer I_TERMINATE just popped", r"input=I_ERROR", r"input=I_FAIL", r"input=I_INTEGRATED cause=C_TIMER_POPPED", r"input=I_FINALIZED cause=C_TIMER_POPPED", r"input=I_ERROR", r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting", r"schedulerd.*Attempting recovery of resource", r"is taking more than 2x its timeout", r"Confirm not received from", r"Welcome reply not received from", r"Attempting to schedule .* after a stop", r"Resource .* was active at shutdown", r"duplicate entries for call_id", r"Search terminated:", r":global_timer_callback", r"Faking parameter digest creation", r"Parameters to .* action changed:", r"Parameters to .* changed", r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)", r"pacemaker-schedulerd.*Recover .*\(.* -\> .*\)", r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting", r"Peer is not part of our cluster", r"We appear to be in an election loop", r"Unknown node -> we will not deliver message", r"(Blackbox dump requested|Problem detected)", r"pacemakerd.*Could not connect to Cluster Configuration Database API", r"Receiving messages from a node we think is dead", r"share the same cluster nodeid", r"share the same name", #r"crm_ipc_send:.*Request .* failed", #r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received", # Not inherently bad, but worth tracking #r"No need to invoke the TE", #r"ping.*: DEBUG: Updated connected = 0", #r"Digest mis-match:", r"pacemaker-controld:.*Transition failed: terminated", r"Local CIB .* differs from .*:", r"warn.*:\s*Continuing but .* will NOT be used", r"warn.*:\s*Cluster configuration file .* is corrupt", #r"Executing .* fencing operation", r"Election storm", r"stalled the FSA with pending inputs", ] self.components["common-ignore"] = [ r"Pending action:", r"resource( was|s were) active at shutdown", r"pending LRM operations at shutdown", r"Lost connection to the CIB manager", r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported", r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ", r".*:\s*Requesting fencing \([^)]+\) of node ", r"(Blackbox dump requested|Problem detected)", ] self.components["corosync-ignore"] = [ r"error:.*Connection to the CPG API failed: Library error", r"\[[0-9]+\] exited with status [0-9]+ \(", r"pacemaker-based.*error:.*Corosync connection lost", r"pacemaker-fenced.*error:.*Corosync connection terminated", r"pacemaker-controld.*State transition .* S_RECOVERY", r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state", r"pacemaker-controld.*error:.*Could not recover from internal error", r"error:.*Connection to cib_(shm|rw).* (failed|closed)", r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"crit: Fencing daemon connection failed", # This is overbroad, but we don't have a way to say that only # certain transition errors are acceptable (if the fencer respawns, # fence devices may appear multiply active). We have to rely on # other causes of a transition error logging their own error # message, which is the usual practice. r"pacemaker-schedulerd.* Calculated transition .*/pe-error", ] self.components["corosync"] = [ # We expect each daemon to lose its cluster connection. # However, if the CIB manager loses its connection first, # it's possible for another daemon to lose that connection and # exit before losing the cluster connection. r"pacemakerd.*:\s*(crit|error):.*Lost connection to cluster layer", r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)", r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer", r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)", r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)", r"schedulerd.*Scheduling Node .* for STONITH", r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK", ] self.components["pacemaker-based"] = [ r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102", r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1", r"pacemakerd.* Respawning failed child process: pacemaker-attrd", r"pacemakerd.* Respawning failed child process: pacemaker-based", r"pacemakerd.* Respawning failed child process: pacemaker-controld", r"pacemakerd.* Respawning failed child process: pacemaker-fenced", r"pacemaker-.* Connection to cib_.* (failed|closed)", r"pacemaker-attrd.*:.*Lost connection to the CIB manager", r"pacemaker-controld.*:.*Lost connection to the CIB manager", r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy", r"pacemaker-controld.* State transition .* S_RECOVERY", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*Could not recover from internal error", ] self.components["pacemaker-based-ignore"] = [ r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", # This is overbroad, but we don't have a way to say that only # certain transition errors are acceptable (if the fencer respawns, # fence devices may appear multiply active). We have to rely on # other causes of a transition error logging their own error # message, which is the usual practice. r"pacemaker-schedulerd.* Calculated transition .*/pe-error", ] self.components["pacemaker-execd"] = [ r"pacemaker-controld.*Connection to (pacemaker-execd|lrmd|executor) (failed|closed)", r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy", r"pacemaker-controld.*State transition .* S_RECOVERY", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*Could not recover from internal error", r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1", r"pacemakerd.*Respawning failed child process: pacemaker-execd", r"pacemakerd.*Respawning failed child process: pacemaker-controld", ] self.components["pacemaker-execd-ignore"] = [ r"pacemaker-attrd.*Connection to lrmd (failed|closed)", r"pacemaker-(attrd|controld).*Could not execute alert", ] self.components["pacemaker-controld"] = [ # "WARN: determine_online_status: Node .* is unclean", # "Scheduling Node .* for STONITH", # Only if the node wasn't the DC: "State transition S_IDLE", "State transition .* -> S_IDLE", ] self.components["pacemaker-controld-ignore"] = [] self.components["pacemaker-attrd"] = [] self.components["pacemaker-attrd-ignore"] = [] self.components["pacemaker-schedulerd"] = [ "State transition .* S_RECOVERY", r"Respawning failed child process: pacemaker-controld", r"pacemaker-controld\[[0-9]+\] exited with status 1 \(", "Connection to pengine failed", "Connection to pengine.* closed", r"Connection to the scheduler failed", "pacemaker-controld.*I_ERROR.*save_cib_contents", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", "pacemaker-controld.*Could not recover from internal error", ] self.components["pacemaker-schedulerd-ignore"] = [] self.components["pacemaker-fenced"] = [ r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"Fencing daemon connection failed", r"pacemaker-controld.*Fencer successfully connected", ] self.components["pacemaker-fenced-ignore"] = [ r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"crit:.*Fencing daemon connection failed", r"error:.*Fencer connection failed \(will retry\)", r"Connection to (fencer|stonith-ng) failed, finalizing .* pending operations", r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error", # This is overbroad, but we don't have a way to say that only # certain transition errors are acceptable (if the fencer respawns, # fence devices may appear multiply active). We have to rely on # other causes of a transition error logging their own error # message, which is the usual practice. r"pacemaker-schedulerd.* Calculated transition .*/pe-error", ] self.components["pacemaker-fenced-ignore"].extend(self.components["common-ignore"]) class crm_corosync_docker(crm_corosync): ''' Patterns for Corosync version 2 cluster manager class ''' def __init__(self, name): crm_corosync.__init__(self, name) self.commands.update({ "StartCmd" : "pcmk_start", "StopCmd" : "pcmk_stop", }) class PatternSelector(object): def __init__(self, name=None): self.name = name self.base = BasePatterns("crm-base") if not name: crm_corosync("crm-corosync") elif name == "crm-corosync": crm_corosync(name) elif name == "crm-corosync-docker": crm_corosync_docker(name) def get_variant(self, variant): if variant in patternvariants: return patternvariants[variant] print("defaulting to crm-base for %s" % variant) return self.base def get_patterns(self, variant, kind): return self.get_variant(variant).get_patterns(kind) def get_template(self, variant, key): v = self.get_variant(variant) return v[key] def get_component(self, variant, kind): return self.get_variant(variant).get_component(kind) def __getitem__(self, key): return self.get_template(self.name, key) # python cts/CTSpatt.py -k crm-corosync -t StartCmd if __name__ == '__main__': pdir=os.path.dirname(sys.path[0]) sys.path.insert(0, pdir) # So that things work from the source directory kind=None template=None skipthis=None args=sys.argv[1:] for i in range(0, len(args)): if skipthis: skipthis=None continue elif args[i] == "-k" or args[i] == "--kind": skipthis=1 kind = args[i+1] elif args[i] == "-t" or args[i] == "--template": skipthis=1 template = args[i+1] else: print("Illegal argument " + args[i]) print(PatternSelector(kind)[template]) diff --git a/cts/remote.py b/cts/remote.py index 0aa8ed93a1..35c741c1e0 100644 --- a/cts/remote.py +++ b/cts/remote.py @@ -1,295 +1,292 @@ """ Remote command runner for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2014-2018 the Pacemaker project contributors" +__copyright__ = "Copyright 2014-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import re import os import sys from subprocess import Popen,PIPE from threading import Thread pdir=os.path.dirname(sys.path[0]) sys.path.insert(0, pdir) # So that things work from the source directory from cts.CTSvars import * from cts.logging import * trace_rsh=None trace_lw=None def convert2string(lines): if sys.version_info < (3, ): return lines if isinstance(lines, bytes): return lines.decode("utf-8") elif isinstance(lines, list): aList = [] for line in lines: if isinstance(line, bytes): line = line.decode("utf-8") aList.append(line) return aList return lines def input_wrapper(data): if sys.version_info > (3,): return input(data) return raw_input(data) class AsyncWaitProc(Thread): def __init__(self, proc, node, command, completionDelegate=None): self.proc = proc self.node = node self.command = command self.logger = LogFactory() self.delegate = completionDelegate; Thread.__init__(self) def run(self): outLines = None errLines = None self.logger.debug("cmd: async: target=%s, pid=%d: %s" % (self.node, self.proc.pid, self.command)) self.proc.wait() self.logger.debug("cmd: pid %d returned %d" % (self.proc.pid, self.proc.returncode)) if self.proc.stderr: errLines = self.proc.stderr.readlines() self.proc.stderr.close() for line in errLines: self.logger.debug("cmd: stderr[%d]: %s" % (self.proc.pid, line)) errLines = convert2string(errLines) if self.proc.stdout: outLines = self.proc.stdout.readlines() self.proc.stdout.close() outLines = convert2string(outLines) # for line in outLines: # self.logger.debug("cmd: stdout[%d]: %s" % (self.proc.pid, line)) if self.delegate: self.delegate.async_complete(self.proc.pid, self.proc.returncode, outLines, errLines) class AsyncRemoteCmd(Thread): def __init__(self, node, command, completionDelegate=None): self.proc = None self.node = node self.command = command self.logger = LogFactory() self.delegate = completionDelegate; Thread.__init__(self) def run(self): outLines = None errLines = None self.proc = Popen(self.command, stdout = PIPE, stderr = PIPE, close_fds = True, shell = True) self.logger.debug("cmd: async: target=%s, pid=%d: %s" % (self.node, self.proc.pid, self.command)) self.proc.wait() self.logger.debug("cmd: pid %d returned %d to %s" % (self.proc.pid, self.proc.returncode, repr(self.delegate))) if self.proc.stderr: errLines = self.proc.stderr.readlines() self.proc.stderr.close() for line in errLines: self.logger.debug("cmd: stderr[%d]: %s" % (self.proc.pid, line)) errLines = convert2string(errLines) if self.proc.stdout: outLines = self.proc.stdout.readlines() self.proc.stdout.close() outLines = convert2string(outLines) # for line in outLines: # self.logger.log("cmd: stdout[%d]: %s" % (self.proc.pid, line)) if self.delegate: self.delegate.async_complete(self.proc.pid, self.proc.returncode, outLines, errLines) class RemotePrimitives(object): def __init__(self, Command=None, CpCommand=None): if CpCommand: self.CpCommand = CpCommand else: # -B: batch mode, -q: no stats (quiet) self.CpCommand = "scp -B -q" if Command: self.Command = Command else: # -n: no stdin, -x: no X11, # -o ServerAliveInterval=5 disconnect after 3*5s if the server stops responding self.Command = "ssh -l root -n -x -o ServerAliveInterval=5 -o ConnectTimeout=10 -o TCPKeepAlive=yes -o ServerAliveCountMax=3 " class RemoteExec(object): '''This is an abstract remote execution class. It runs a command on another machine - somehow. The somehow is up to us. This particular class uses ssh. Most of the work is done by fork/exec of ssh or scp. ''' def __init__(self, rsh, silent=False): self.rsh = rsh self.silent = silent self.logger = LogFactory() if trace_rsh: self.silent = False self.OurNode=os.uname()[1].lower() def _fixcmd(self, cmd): return re.sub("\'", "'\\''", cmd) def _cmd(self, *args): '''Compute the string that will run the given command on the given remote system''' args= args[0] sysname = args[0] command = args[1] #print("sysname: %s, us: %s" % (sysname, self.OurNode)) if sysname == None or sysname.lower() == self.OurNode or sysname == "localhost": ret = command else: ret = self.rsh.Command + " " + sysname + " '" + self._fixcmd(command) + "'" #print ("About to run %s\n" % ret) return ret def log(self, args): if not self.silent: self.logger.log(args) def debug(self, args): if not self.silent: self.logger.debug(args) def call_async(self, node, command, completionDelegate=None): #if completionDelegate: print("Waiting for %d on %s: %s" % (proc.pid, node, command)) aproc = AsyncRemoteCmd(node, self._cmd([node, command]), completionDelegate=completionDelegate) aproc.start() return aproc def __call__(self, node, command, stdout=0, synchronous=1, silent=False, blocking=True, completionDelegate=None): '''Run the given command on the given remote system If you call this class like a function, this is the function that gets called. It just runs it roughly as though it were a system() call on the remote machine. The first argument is name of the machine to run it on. ''' if trace_rsh: silent = False rc = 0 result = None proc = Popen(self._cmd([node, command]), stdout = PIPE, stderr = PIPE, close_fds = True, shell = True) #if completionDelegate: print("Waiting for %d on %s: %s" % (proc.pid, node, command)) if not synchronous and proc.pid > 0 and not self.silent: aproc = AsyncWaitProc(proc, node, command, completionDelegate=completionDelegate) aproc.start() return 0 #if not blocking: # import fcntl # fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) if proc.stdout: if stdout == 1: result = proc.stdout.readline() else: result = proc.stdout.readlines() proc.stdout.close() else: self.log("No stdout stream") rc = proc.wait() if not silent: self.debug("cmd: target=%s, rc=%d: %s" % (node, rc, command)) result = convert2string(result) if proc.stderr: errors = proc.stderr.readlines() proc.stderr.close() if stdout == 1: return result if completionDelegate: completionDelegate.async_complete(proc.pid, proc.returncode, result, errors) if not silent: for err in errors: self.debug("cmd: stderr: %s" % err) if stdout == 0: if not silent and result: for line in result: self.debug("cmd: stdout: %s" % line) return rc return (rc, result) def cp(self, source, target, silent=False): '''Perform a remote copy''' cpstring = self.rsh.CpCommand + " \'" + source + "\'" + " \'" + target + "\'" rc = os.system(cpstring) if trace_rsh: silent = False if not silent: self.debug("cmd: rc=%d: %s" % (rc, cpstring)) return rc def exists_on_all(self, filename, hosts, test="r"): """ Return True if specified file exists on all specified hosts. """ for host in hosts: rc = self(host, "test -%s %s" % (test, filename)) if rc != 0: return False return True class RemoteFactory(object): # Class variables rsh = RemotePrimitives() instance = None def getInstance(self): if not RemoteFactory.instance: RemoteFactory.instance = RemoteExec(RemoteFactory.rsh, False) return RemoteFactory.instance def new(self, silent=False): return RemoteExec(RemoteFactory.rsh, silent) def enable_docker(self): print("Using DOCKER backend for connections to cluster nodes") RemoteFactory.rsh.Command = "/usr/libexec/phd/docker/phd_docker_remote_cmd " RemoteFactory.rsh.CpCommand = "/usr/libexec/phd/docker/phd_docker_cp" def enable_qarsh(self): # http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/ print("Using QARSH for connections to cluster nodes") RemoteFactory.rsh.Command = "qarsh -t 300 -l root" RemoteFactory.rsh.CpCommand = "qacp -q" diff --git a/cts/watcher.py b/cts/watcher.py index 6888a63f56..9fac030fc7 100644 --- a/cts/watcher.py +++ b/cts/watcher.py @@ -1,436 +1,433 @@ """ Log searching classes for Pacemaker's Cluster Test Suite (CTS) """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2014-2019 the Pacemaker project contributors" +__copyright__ = "Copyright 2014-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import re import os import time import threading from cts.remote import * from cts.logging import * log_watcher_bin = CTSvars.CRM_DAEMON_DIR + "/cts-log-watcher" class SearchObj(object): def __init__(self, filename, host=None, name=None): self.limit = None self.cache = [] self.logger = LogFactory() self.host = host self.name = name self.filename = filename self.rsh = RemoteFactory().getInstance() self.offset = "EOF" if host == None: self.host = "localhost" def __str__(self): if self.host: return "%s:%s" % (self.host, self.filename) return self.filename def log(self, args): message = "lw: %s: %s" % (self, args) self.logger.log(message) def debug(self, args): message = "lw: %s: %s" % (self, args) self.logger.debug(message) def harvest(self, delegate=None): async_task = self.harvest_async(delegate) async_task.join() def harvest_async(self, delegate=None): self.log("Not implemented") raise def end(self): self.debug("Unsetting the limit") # Unset the limit self.limit = None class FileObj(SearchObj): def __init__(self, filename, host=None, name=None): SearchObj.__init__(self, filename, host, name) self.harvest() def async_complete(self, pid, returncode, outLines, errLines): for line in outLines: match = re.search("^CTSwatcher:Last read: (\d+)", line) if match: self.offset = match.group(1) self.debug("Got %d lines, new offset: %s %s" % (len(outLines), self.offset, repr(self.delegate))) elif re.search("^CTSwatcher:.*truncated", line): self.log(line) elif re.search("^CTSwatcher:", line): self.debug("Got control line: "+ line) else: self.cache.append(line) if self.delegate: self.delegate.async_complete(pid, returncode, self.cache, errLines) def harvest_async(self, delegate=None): self.delegate = delegate self.cache = [] if (self.limit is not None) and (self.offset == "EOF" or int(self.offset) > self.limit): if self.delegate: self.delegate.async_complete(-1, -1, [], []) return None global log_watcher_bin return self.rsh.call_async(self.host, "%s -t %s -p CTSwatcher: -l 200 -f %s -o %s" % (log_watcher_bin, self.name, self.filename, self.offset), completionDelegate=self) def setend(self): if self.limit: return global log_watcher_bin (rc, lines) = self.rsh(self.host, "%s -t %s -p CTSwatcher: -l 2 -f %s -o %s" % (log_watcher_bin, self.name, self.filename, "EOF"), None, silent=True) for line in lines: match = re.search("^CTSwatcher:Last read: (\d+)", line) if match: self.limit = int(match.group(1)) self.debug("Set limit to: %d" % self.limit) return class JournalObj(SearchObj): def __init__(self, host=None, name=None): SearchObj.__init__(self, name, host, name) self.harvest() def async_complete(self, pid, returncode, outLines, errLines): #self.log( "%d returned on %s" % (pid, self.host)) foundCursor = False for line in outLines: match = re.search("^-- cursor: ([^.]+)", line) if match: foundCursor = True self.offset = match.group(1).strip() self.debug("Got %d lines, new cursor: %s" % (len(outLines), self.offset)) else: self.cache.append(line) if self.limit and not foundCursor: self.hitLimit = True self.debug("Got %d lines but no cursor: %s" % (len(outLines), self.offset)) # Get the current cursor (rc, outLines) = self.rsh(self.host, "journalctl -q -n 0 --show-cursor", stdout=None, silent=True, synchronous=True) for line in outLines: match = re.search("^-- cursor: ([^.]+)", line) if match: self.offset = match.group(1).strip() self.debug("Got %d lines, new cursor: %s" % (len(outLines), self.offset)) else: self.log("Not a new cursor: %s" % line) self.cache.append(line) if self.delegate: self.delegate.async_complete(pid, returncode, self.cache, errLines) def harvest_async(self, delegate=None): self.delegate = delegate self.cache = [] # Use --lines to prevent journalctl from overflowing the Popen input buffer if self.limit and self.hitLimit: return None elif self.limit: command = "journalctl -q --after-cursor='%s' --until '%s' --lines=200 --show-cursor" % (self.offset, self.limit) else: command = "journalctl -q --after-cursor='%s' --lines=200 --show-cursor" % (self.offset) if self.offset == "EOF": command = "journalctl -q -n 0 --show-cursor" return self.rsh.call_async(self.host, command, completionDelegate=self) def setend(self): if self.limit: return self.hitLimit = False (rc, lines) = self.rsh(self.host, "date +'%Y-%m-%d %H:%M:%S'", stdout=None, silent=True) if (rc == 0) and (len(lines) == 1): self.limit = lines[0].strip() self.debug("Set limit to: %s" % self.limit) else: self.debug("Unable to set limit for %s because date returned %d lines with status %d" % (self.host, len(lines), rc)) return class LogWatcher(RemoteExec): '''This class watches logs for messages that fit certain regular expressions. Watching logs for events isn't the ideal way to do business, but it's better than nothing :-) On the other hand, this class is really pretty cool ;-) The way you use this class is as follows: Construct a LogWatcher object Call setwatch() when you want to start watching the log Call look() to scan the log looking for the patterns ''' def __init__(self, log, regexes, name="Anon", timeout=10, debug_level=None, silent=False, hosts=None, kind=None): '''This is the constructor for the LogWatcher class. It takes a log name to watch, and a list of regular expressions to watch for." ''' self.logger = LogFactory() self.name = name self.regexes = regexes if debug_level is None: debug_level = 1 self.debug_level = debug_level self.whichmatch = -1 self.unmatched = None self.cache_lock = threading.Lock() self.file_list = [] self.line_cache = [] # Validate our arguments. Better sooner than later ;-) for regex in regexes: assert re.compile(regex) if kind: self.kind = kind else: raise #self.kind = self.Env["LogWatcher"] if log: self.filename = log else: raise #self.filename = self.Env["LogFileName"] if hosts: self.hosts = hosts else: raise #self.hosts = self.Env["nodes"] if trace_lw: self.debug_level = 3 silent = False if not silent: for regex in self.regexes: self.debug("Looking for regex: "+regex) self.Timeout = int(timeout) self.returnonlymatch = None def debug(self, args): message = "lw: %s: %s" % (self.name, args) self.logger.debug(message) def setwatch(self): '''Mark the place to start watching the log from. ''' if self.kind == "remote": for node in self.hosts: self.file_list.append(FileObj(self.filename, node, self.name)) elif self.kind == "journal": for node in self.hosts: self.file_list.append(JournalObj(node, self.name)) else: self.file_list.append(FileObj(self.filename)) # print("%s now has %d files" % (self.name, len(self.file_list))) def __del__(self): if self.debug_level > 1: self.debug("Destroy") def ReturnOnlyMatch(self, onlymatch=1): '''Specify one or more subgroups of the match to return rather than the whole string http://www.python.org/doc/2.5.2/lib/match-objects.html ''' self.returnonlymatch = onlymatch def async_complete(self, pid, returncode, outLines, errLines): # TODO: Probably need a lock for updating self.line_cache self.logger.debug("%s: Got %d lines from %d (total %d)" % (self.name, len(outLines), pid, len(self.line_cache))) if len(outLines): self.cache_lock.acquire() self.line_cache.extend(outLines) self.cache_lock.release() def __get_lines(self, timeout): count=0 if not len(self.file_list): raise ValueError("No sources to read from") pending = [] #print("%s waiting for %d operations" % (self.name, self.pending)) for f in self.file_list: t = f.harvest_async(self) if t: pending.append(t) for t in pending: t.join(60.0) if t.isAlive(): self.logger.log("%s: Aborting after 20s waiting for %s logging commands" % (self.name, repr(t))) return #print("Got %d lines" % len(self.line_cache)) def end(self): for f in self.file_list: f.end() def look(self, timeout=None, silent=False): '''Examine the log looking for the given patterns. It starts looking from the place marked by setwatch(). This function looks in the file in the fashion of tail -f. It properly recovers from log file truncation, but not from removing and recreating the log. It would be nice if it recovered from this as well :-) We return the first line which matches any of our patterns. ''' if timeout == None: timeout = self.Timeout if trace_lw: silent = False lines=0 needlines=True begin=time.time() end=begin+timeout+1 if self.debug_level > 2: self.debug("starting single search: timeout=%d, begin=%d, end=%d" % (timeout, begin, end)) if not self.regexes: self.debug("Nothing to look for") return None if timeout == 0: for f in self.file_list: f.setend() while True: if len(self.line_cache): lines += 1 self.cache_lock.acquire() line = self.line_cache[0] self.line_cache.remove(line) self.cache_lock.release() which=-1 if re.search("CTS:", line): continue if self.debug_level > 2: self.debug("Processing: "+ line) for regex in self.regexes: which=which+1 if self.debug_level > 3: self.debug("Comparing line to: "+ regex) matchobj = re.search(regex, line) if matchobj: self.whichmatch=which if self.returnonlymatch: return matchobj.group(self.returnonlymatch) else: self.debug("Matched: "+line) if self.debug_level > 1: self.debug("With: "+ regex) return line elif timeout > 0 and end < time.time(): if self.debug_level > 1: self.debug("hit timeout: %d" % timeout) timeout = 0 for f in self.file_list: f.setend() else: self.__get_lines(timeout) if len(self.line_cache) == 0 and end < time.time(): self.debug("Single search terminated: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), lines)) return None else: self.debug("Waiting: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), len(self.line_cache))) time.sleep(1) self.debug("How did we get here") return None def lookforall(self, timeout=None, allow_multiple_matches=None, silent=False): '''Examine the log looking for ALL of the given patterns. It starts looking from the place marked by setwatch(). We return when the timeout is reached, or when we have found ALL of the regexes that were part of the watch ''' if timeout == None: timeout = self.Timeout save_regexes = self.regexes returnresult = [] if trace_lw: silent = False if not silent: self.debug("starting search: timeout=%d" % timeout) for regex in self.regexes: if self.debug_level > 2: self.debug("Looking for regex: "+regex) while (len(self.regexes) > 0): oneresult = self.look(timeout) if not oneresult: self.unmatched = self.regexes self.matched = returnresult self.regexes = save_regexes self.end() return None returnresult.append(oneresult) if not allow_multiple_matches: del self.regexes[self.whichmatch] else: # Allow multiple regexes to match a single line tmp_regexes = self.regexes self.regexes = [] which = 0 for regex in tmp_regexes: matchobj = re.search(regex, oneresult) if not matchobj: self.regexes.append(regex) self.unmatched = None self.matched = returnresult self.regexes = save_regexes return returnresult diff --git a/daemons/execd/Makefile.am b/daemons/execd/Makefile.am index 6fd10a4f66..661233d1c0 100644 --- a/daemons/execd/Makefile.am +++ b/daemons/execd/Makefile.am @@ -1,62 +1,67 @@ # # Copyright 2012-2020 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU Lesser General Public License # version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk halibdir = $(CRM_DAEMON_DIR) halib_PROGRAMS = pacemaker-execd cts-exec-helper initdir = $(INITDIR) init_SCRIPTS = pacemaker_remote sbin_PROGRAMS = pacemaker-remoted if BUILD_SYSTEMD systemdsystemunit_DATA = pacemaker_remote.service endif pacemaker_execd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_execd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemaker_execd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/services/libcrmservice.la \ $(top_builddir)/lib/fencing/libstonithd.la pacemaker_execd_SOURCES = pacemaker-execd.c execd_commands.c \ execd_alerts.c pacemaker_remoted_CPPFLAGS = -DSUPPORT_REMOTE $(AM_CPPFLAGS) pacemaker_remoted_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_remoted_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemaker_remoted_LDADD = $(pacemaker_execd_LDADD) \ $(top_builddir)/lib/lrmd/liblrmd.la pacemaker_remoted_SOURCES = $(pacemaker_execd_SOURCES) \ remoted_tls.c remoted_pidone.c remoted_proxy.c cts_exec_helper_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/lrmd/liblrmd.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/services/libcrmservice.la \ $(top_builddir)/lib/pengine/libpe_status.la cts_exec_helper_SOURCES = cts-exec-helper.c noinst_HEADERS = pacemaker-execd.h CLEANFILES = $(man8_MANS) -if BUILD_LEGACY_LINKS +# Always create a symlink for the old pacemaker_remoted name, so that bundle +# container images using a current Pacemaker will run on cluster nodes running +# Pacemaker 1 (>=1.1.17). install-exec-hook: +if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f lrmd && $(LN_S) pacemaker-execd lrmd +endif cd $(DESTDIR)$(sbindir) && rm -f pacemaker_remoted && $(LN_S) pacemaker-remoted pacemaker_remoted uninstall-hook: +if BUILD_LEGACY_LINKS cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f lrmd - cd $(DESTDIR)$(sbindir) && rm -f pacemaker_remoted endif + cd $(DESTDIR)$(sbindir) && rm -f pacemaker_remoted diff --git a/daemons/fenced/fence_legacy.in b/daemons/fenced/fence_legacy.in index 7324757e35..9f527f7e81 100755 --- a/daemons/fenced/fence_legacy.in +++ b/daemons/fenced/fence_legacy.in @@ -1,275 +1,272 @@ #!@PYTHON@ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2018 Andrew Beekhof " +__copyright__ = "Copyright 2018-2020 Andrew Beekhof " __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import os import sys import argparse import subprocess VERSION = "1.1.0" USAGE = """Helper that presents a Pacemaker-style interface for Linux-HA stonith plugins Should never be invoked by the user directly Usage: fence_legacy [options] Options: -h usage -t sub agent -n nodename -o Action: on | off | reset (default) | stat | hostlist -s stonith command -q quiet mode -V version""" META_DATA = """ This agent should never be invoked by the user directly. https://www.clusterlabs.org/ Fencing Action Physical plug number or name of virtual machine Display help and exit """ ACTIONS = [ "on", "off", "reset", "reboot", "stat", "status", "metadata", "monitor", "list", "hostlist", "poweroff", "poweron" ] # These values must be kept in sync with include/crm/crm.h class CrmExit(object): OK = 0 ERROR = 1 INVALID_PARAM = 2 def parse_cli_options(): """ Return parsed command-line options (as argparse namespace) """ # Don't add standard help option, so we can format it how we want parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-t", metavar="SUBAGENT", dest="subagent", nargs=1, default="none", help="sub-agent") parser.add_argument("-n", metavar="NODE", dest="node", nargs=1, default="", help="name of target node") # The help text here is consistent with the original version, though # perhaps all actions should be listed. parser.add_argument("-o", metavar="ACTION", dest="action", nargs=1, choices=ACTIONS, default="reset", help="action: on | off | reset (default) | stat | hostlist") parser.add_argument("-s", metavar="COMMAND", dest="command", nargs=1, default="stonith", help="stonith command") parser.add_argument("-q", dest="quiet", action="store_true", help="quiet mode") parser.add_argument("-h", "--help", action="store_true", help="show usage and exit") # Don't use action="version", because that printed to stderr before # Python 3.4, and help2man doesn't like that. parser.add_argument("-V", "--version", action="store_true", help="show version and exit") return parser.parse_args() def parse_stdin_options(options): """ Update options namespace with options parsed from stdin """ nlines = 0 for line in sys.stdin: # Remove leading and trailing whitespace line = line.strip() # Skip blank lines and comments if line == "" or line[0] == "#": continue nlines = nlines + 1 # Parse option name and value (allow whitespace around equals sign) try: (name, value) = line.split("=", 1) name = name.rstrip() if name == "": raise ValueError except ValueError: print("parse error: illegal name in option %d" % nlines, file=sys.stderr) sys.exit(CrmExit.INVALID_PARAM) value = value.lstrip() if name == "plugin": options.subagent = value elif name in [ "option", "action" ]: options.action = value elif name == "nodename": options.node = value os.environ[name] = value elif name == "stonith": options.command = value elif name != "agent": # agent is used by fenced os.environ[name] = value def normalize_options(options): """ Use string rather than list of one string """ if not hasattr(options.subagent, "strip"): options.subagent = options.subagent[0] if not hasattr(options.node, "strip"): options.node = options.node[0] if not hasattr(options.action, "strip"): options.action = options.action[0] if not hasattr(options.command, "strip"): options.command = options.command[0] def build_command(options): """ Return command to execute (as list of arguments) """ if options.action in [ "hostlist", "list" ]: extra_args = [ "-l" ] elif options.action in [ "monitor", "stat", "status" ]: extra_args = [ "-S" ] else: if options.node == "": if not options.quiet: print("failed: no plug number") sys.exit(CrmExit.ERROR) extra_args = [ "-T", options.action, options.node ] return [ options.command, "-t", options.subagent, "-E" ] + extra_args def handle_local_options(options): """ Handle options that don't require the fence agent """ if options.help: print(USAGE) sys.exit(CrmExit.OK) if options.version: print(VERSION) sys.exit(CrmExit.OK) def remap_action(options): """ Pre-process requested action """ options.action = options.action.lower() if options.action == "metadata": print(META_DATA) sys.exit(CrmExit.OK) elif options.action in [ "hostlist", "list" ]: options.quiet = True # Remap accepted aliases to their actual commands elif options.action == "reboot": options.action = "reset" elif options.action == "poweron": options.action = "on" elif options.action == "poweroff": options.action = "off" def execute_command(options, cmd): """ Execute command and return its exit status """ if not options.quiet: print("Performing: " + " ".join(cmd)) return subprocess.call(cmd) def handle_result(options, status): """ Process fence agent result """ if status == 0: message = "success" exitcode = CrmExit.OK else: message = "failed" exitcode = CrmExit.ERROR if not options.quiet: print("%s: %s %d" % (message, options.node, status)) sys.exit(exitcode) def main(): """ Execute an LHA-style fence agent """ options = parse_cli_options() handle_local_options(options) normalize_options(options) parse_stdin_options(options) remap_action(options) cmd = build_command(options) status = execute_command(options, cmd) handle_result(options, status) if __name__ == "__main__": main() diff --git a/doc/sphinx/Pacemaker_Development/python.rst b/doc/sphinx/Pacemaker_Development/python.rst index 8c28d44f8e..9a828771e3 100644 --- a/doc/sphinx/Pacemaker_Development/python.rst +++ b/doc/sphinx/Pacemaker_Development/python.rst @@ -1,153 +1,87 @@ .. index:: single: Python pair: Python; guidelines Python Coding Guidelines ------------------------ .. index:: pair: Python; boilerplate pair: license; Python pair: copyright; Python .. _s-python-boilerplate: Python Boilerplate ################## If a Python file is meant to be executed (as opposed to imported), it should have a ``.in`` extension, and its first line should be: .. code-block:: python #!@PYTHON@ which will be replaced with the appropriate python executable when Pacemaker is built. To make that happen, add an ``AC_CONFIG_FILES()`` line to ``configure.ac``, and add the file name without ``.in`` to ``.gitignore`` (see existing examples). After the above line if any, every Python file should start like this: .. code-block:: python """ """ - # Pacemaker targets compatibility with Python 2.7 and 3.2+ - from __future__ import print_function, unicode_literals, absolute_import, division - __copyright__ = "Copyright the Pacemaker project contributors" __license__ = " WITHOUT ANY WARRANTY" ** is obviously a brief description of the file's purpose. The string may contain any other information typically used in a Python file `docstring `_. -The ``import`` statement is discussed further in :ref:`s-python-future-imports`. - ```` should follow the policy set forth in the `COPYING `_ file, generally one of "GNU General Public License version 2 or later (GPLv2+)" or "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)". .. index:: - single: Python; 2 single: Python; 3 single: Python; version -Python Compatibility -#################### - -Pacemaker targets compatibility with Python 2.7, and Python 3.2 and -later. These versions have added features to be more compatible with each -other, allowing us to support both the 2 and 3 series with the same code. It is -a good idea to test any changes with both Python 2 and 3. - -.. _s-python-future-imports: - -Python Future Imports -_____________________ - -The future imports used in :ref:`s-python-boilerplate` mean: - -* All print statements must use parentheses, and printing without a newline - is accomplished with the ``end=' '`` parameter rather than a trailing comma. -* All string literals will be treated as Unicode (the ``u`` prefix is - unnecessary, and must not be used, because it is not available in Python 3.2). -* Local modules must be imported using ``from . import`` (rather than just - ``import``). To import one item from a local module, use - ``from .modulename import`` (rather than ``from modulename import``). -* Division using ``/`` will always return a floating-point result (use ``//`` - if you want the integer floor instead). - -Other Python Compatibility Requirements -_______________________________________ - -* When specifying an exception variable, always use ``as`` instead of a comma - (e.g. ``except Exception as e`` or ``except (TypeError, IOError) as e``). - Use ``e.args`` to access the error arguments (instead of iterating over or - subscripting ``e``). -* Use ``in`` (not ``has_key()``) to determine if a dictionary has a particular - key. -* Always use the I/O functions from the ``io`` module rather than the native - I/O functions (e.g. ``io.open()`` rather than ``open()``). -* When opening a file, always use the ``t`` (text) or ``b`` (binary) mode flag. -* When creating classes, always specify a parent class to ensure that it is a - "new-style" class (e.g. ``class Foo(object):`` rather than ``class Foo:``). -* Be aware of the bytes type added in Python 3. Many places where strings are - used in Python 2 use bytes or bytearrays in Python 3 (for example, the pipes - used with ``subprocess.Popen()``). Code should handle both possibilities. -* Be aware that the ``items()``, ``keys()``, and ``values()`` methods of - dictionaries return lists in Python 2 and views in Python 3. In many case, no - special handling is required, but if the code needs to use list methods on - the result, cast the result to list first. -* Do not raise or catch strings as exceptions (e.g. ``raise "Bad thing"``). -* Do not use the ``cmp`` parameter of sorting functions (use ``key`` instead, - if needed) or the ``__cmp__()`` method of classes (implement rich comparison - methods such as ``__lt__()`` instead, if needed). -* Do not use the ``buffer`` type. -* Do not use features not available in all targeted Python versions. Common - examples include: - - * The ``html``, ``ipaddress``, and ``UserDict`` modules - * The ``subprocess.run()`` function - * The ``subprocess.DEVNULL`` constant - * ``subprocess`` module-specific exceptions +Python Version Compatibility +############################ -Python Usages to Avoid -______________________ +Pacemaker targets compatibility with Python 3.2 and later. -Avoid the following if possible, otherwise research the compatibility issues -involved (hacky workarounds are often available): +Do not use features not available in all targeted Python versions. Some +examples include: -* long integers -* octal integer literals -* mixed binary and string data in one data file or variable -* metaclasses -* ``locale.strcoll`` and ``locale.strxfrm`` -* the ``configparser`` and ``ConfigParser`` modules -* importing compatibility modules such as ``six`` (so we don't have - to add them to Pacemaker's dependencies) + * The ``u`` prefix for string literals + * The ``ipaddress`` module + * The ``subprocess.run()`` function + * The ``subprocess.DEVNULL`` constant + * ``subprocess`` module-specific exceptions .. index:: pair: Python; whitespace Formatting Python Code ###################### * Indentation must be 4 spaces, no tabs. * Do not leave trailing whitespace. * Lines should be no longer than 80 characters unless limiting line length significantly impacts readability. For Python, this limitation is flexible since breaking a line often impacts readability, but definitely keep it under 120 characters. * Where not conflicting with this style guide, it is recommended (but not required) to follow `PEP 8 `_. * It is recommended (but not required) to format Python code such that ``pylint --disable=line-too-long,too-many-lines,too-many-instance-attributes,too-many-arguments,too-many-statements`` produces minimal complaints (even better if you don't need to disable all those checks). diff --git a/extra/clustermon.sh b/extra/clustermon.sh deleted file mode 100644 index 8451df3a81..0000000000 --- a/extra/clustermon.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -# -# Cluster Monitoring Script, for use with crm_mon and Pacemaker, -# to emulate the no-longer-supported --snmp-traps and --mail options. -# (However, alerts are the recommended means of doing this since -# Pacemaker 1.1.15.) -# -# Copyright 2013-2017 the Pacemaker project contributors -# -# The version control history for this file may have further details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . -# - -# Uncomment these lines to run a test of the script. -#CRM_notify_desc=OK -#CRM_notify_node=freepbx-a -#CRM_notify_rc=0 -#CRM_notify_recipient=emailalert@hipbx.org -#CRM_notify_rsc=spare_fs -#CRM_notify_status=0 -#CRM_notify_target_rc=0 -#CRM_notify_task=start - -# Set some defaults -FROMADDR=cluster@`hostname` - -# Check to see if we should do anything with this alert. -# You could put some smarts in this, a database lookup, etc -# This is just a simple setup that tries to guess what you -# want to do based on the notify_recipient option to crm_mon - -# Basic examples: -# pcs resource create ClusterMon-Email --clone ClusterMon user=root extra_options="-E /usr/local/bin/clustermon-new.sh -e emailalert@hipbx.org" -# pcs resource create ClusterMon-SNMP --clone ClusterMon user=root extra_options="-E /usr/local/bin/clustermon-new.sh -e 192.168.254.254" - -function getDest { - # This looks for an '@' in the notify_recipient. If it finds it, it'll send an email. - # Otherwise, it'll send a SNMP trap. - if [ ! -z "${CRM_notify_recipient}" ] - then - if [[ "${CRM_notify_recipient}" == *@* ]] - then - SMTPDEST=${CRM_notify_recipient} - DEST='smtp' - else - SNMPDEST=${CRM_notify_recipient} - DEST='snmp' - fi - else - # Hard coded defaults. Please change these. - SMTPDEST='ididntchangethedefaults@hipbx.org' - SNMPDEST='192.168.254.254' - SNMPCOMMUNITY='public' - # If 'DEST' is blank, nothing happens. - DEST='both' - fi -} - -# Other Predefined functions - -function sendsmtp { - # Re-implemtation of code in crm_mon.c - [ -z "${SMTPDEST}" ] && return - - [ -z "${CRM_notify_node}" ] && CRM_notify_node="-" - node=${CRM_notify_node} - [ -z "${CRM_notify_rsc}" ] && CRM_notify_rsc="-" - rsc=${CRM_notify_rsc} - [ -z "${CRM_notify_desc}" ] && CRM_notify_desc="-" - desc=${CRM_notify_desc} - - crm_mail_prefix="Cluster notification" - - subject="${crm_mail_prefix} - ${CRM_notify_task} event for ${rsc} on ${node}" - body="\n${crm_mail_pref}\n====\n\n" - if [ ${CRM_notify_target_rc} -eq ${CRM_notify_rc} ] - then - body="${body}Completed operation ${CRM_notify_task} for resource ${rsc} on ${node}\n" - else - body="${body}Operation ${CRM_notify_task} for resource ${rsc} on ${node} failed: ${desc}\n" - fi - statusstr=$(ocf_status ${CRM_notify_status}) - body="${body}\nDetails:\n\toperation status: (${CRM_notify_status}) ${statusstr}\n" - if [ "${CRM_notify_status}" -eq 0 ] - then - result=$(ocf_exitcode ${CRM_notify_rc}) - target=$(ocf_exitcode ${CRM_notify_target_rc}) - body="${body}\tscript returned: (${CRM_notify_rc}) ${result}\n" - body="${body}\texpected return value: (${CRM_notify_target_rc}) ${target}\n" - fi - - echo -e $body | mail -r "$FROMADDR" -s "$subject" "$SMTPDEST" -} - -function ocf_status { - case $1 in - -1) echo "pending" ;; - 0) echo "complete" ;; - 1) echo "Cancelled" ;; - 2) echo "Timed Out" ;; - 3) echo "NOT SUPPORTED" ;; - 4) echo "Error" ;; - 5) echo "Not installed" ;; - *) echo "Exceptionally unusual" ;; - esac -} - -function ocf_exitcode { - case $1 in - 0) echo "OK" ;; - 1) echo "Unknown Error" ;; - 2) echo "Invalid Parameter" ;; - 3) echo "Unimplemented Feature" ;; - 4) echo "Insufficient Privileges" ;; - 5) echo "not installed" ;; - 6) echo "not configured" ;; - 7) echo "not running" ;; - 8) echo "master" ;; - 9) echo "master (failed)" ;; - 192) echo "OCF_EXEC_ERROR" ;; - 193) echo "OCF_UNKNOWN" ;; - 194) echo "OCF_SIGNAL" ;; - 195) echo "OCF_NOT_SUPPORTED" ;; - 196) echo "OCF_PENDING" ;; - 197) echo "OCF_CANCELLED" ;; - 198) echo "OCF_TIMEOUT" ;; - 199) echo "OCF_OTHER_ERROR" ;; - *) echo "Exceptionally unknown error" ;; - esac -} - - -function sendsnmp() { - [ -f /usr/bin/snmptrap ] && /usr/bin/snmptrap -v 2c -c "$SNMPCOMMUNITY" "$SNMPDEST" "" PACEMAKER-MIB::pacemakerNotification \ - PACEMAKER-MIB::pacemakerNotificationNode s "${CRM_notify_node}" \ - PACEMAKER-MIB::pacemakerNotificationResource s "${CRM_notify_rsc}" \ - PACEMAKER-MIB::pacemakerNotificationOperation s "${CRM_notify_task}" \ - PACEMAKER-MIB::pacemakerNotificationDescription s "${CRM_notify_desc}" \ - PACEMAKER-MIB::pacemakerNotificationStatus i "${CRM_notify_status}" \ - PACEMAKER-MIB::pacemakerNotificationReturnCode i ${CRM_notify_rc} \ - PACEMAKER-MIB::pacemakerNotificationTargetReturnCode i ${CRM_notify_target_rc} -} - -# Lets see who wants to do what with this. -getDest - -# Do we want to do anything with this alert? -if [ -z "$DEST" ] -then - exit 0; -fi - -if [ "$DEST" == "both" -o "$DEST" == "smtp" ] -then - sendsmtp; -fi - -if [ "$DEST" == "both" -o "$DEST" == "snmp" ] -then - sendsnmp; -fi diff --git a/extra/resources/Makefile.am b/extra/resources/Makefile.am index 24afd0e97d..69cd8bd728 100644 --- a/extra/resources/Makefile.am +++ b/extra/resources/Makefile.am @@ -1,40 +1,39 @@ # # Copyright 2008-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk ocfdir = @OCF_RA_DIR@/pacemaker dist_ocf_SCRIPTS = attribute \ controld \ Dummy \ HealthCPU \ HealthIOWait \ ping \ - pingd \ remote \ Stateful \ SystemHealth ocf_SCRIPTS = ClusterMon \ HealthSMART \ ifspeed \ o2cb \ SysInfo if BUILD_XML_HELP man7_MANS = $(ocf_SCRIPTS:%=ocf_pacemaker_%.7) $(dist_ocf_SCRIPTS:%=ocf_pacemaker_%.7) DBOOK_OPTS = --stringparam command.prefix ocf_pacemaker_ --stringparam variable.prefix OCF_RESKEY_ --param man.vol 7 ocf_pacemaker_%.xml: % $(AM_V_GEN)OCF_FUNCTIONS=/dev/null OCF_ROOT=$(OCF_ROOT_DIR) $(abs_builddir)/$< meta-data > $@ endif clean-generic: rm -f $(man7_MANS) $(ocf_SCRIPTS:%=%.xml) $(dist_ocf_SCRIPTS:%=%.xml) *~ diff --git a/extra/resources/pingd b/extra/resources/pingd deleted file mode 100755 index 6e50e906b4..0000000000 --- a/extra/resources/pingd +++ /dev/null @@ -1,185 +0,0 @@ -#!/bin/sh -# -# ocf:pacemaker:pingd resource agent -# -# Copyright 2006-2019 the Pacemaker project contributors -# -# The version control history for this file may have further details. -# -# This source code is licensed under the GNU General Public License version 2 -# (GPLv2) WITHOUT ANY WARRANTY. -# - -# -# Records (in the CIB) the current number of ping nodes a cluster node can -# connect to. -# -####################################################################### -# Initialization: - -: ${OCF_FUNCTIONS:="${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs"} -. "${OCF_FUNCTIONS}" -: ${__OCF_ACTION:="$1"} - -: ${OCF_RESKEY_name:="pingd"} -: ${OCF_RESKEY_interval:="1"} -: ${OCF_RESKEY_CRM_meta_interval:=0} - -ocf_log err "This agent (ocf:pacemaker:pingd) is deprecated, does not work, and" -ocf_log err "will be removed in a future release (use ocf:pacemaker:ping instead)" - -case "$__OCF_ACTION" in - start|monitor) - # BUG: The sense of this test is reversed. Rather than fix it now, we - # will formally deprecate the agent and remove it in a future release. - if [ -n "$OCF_RESKEY_host_list" ]; then - ocf_log err "Automatic conversion to ocf:pacemaker:ping failed: no hosts were configured to check for connectivity" - exit $OCF_ERR_ARGS - fi - - recurring=$(crm configure show "$OCF_RESOURCE_INSTANCE" | grep "op monitor.*interval=\"[1-9]" | sed s/.*interval=// | awk -F\" '{print $2}' | sed s/.*interval=// | awk -F\" '{print $2}' | sort | head -n 1) - - if [ -z "$recurring" ]; then - ocf_log err "Automatic conversion to ocf:pacemaker:ping failed: no monitor operation configured" - ocf_log err "Without an explicit monitor operation for '$OCF_RESOURCE_INSTANCE', connectivity changes will not be noticed" - ocf_log err "Preventing startup to ensure the issue is addressed before it matters" - exit $OCF_ERR_ARGS - fi - - if [ $OCF_RESKEY_CRM_meta_interval -eq 0 ]; then - if [ $recurring -ne $OCF_RESKEY_interval ]; then - ocf_log warn "Your monitor operation happens every $recurring, which means that the $OCF_RESKEY_name attribute will be updated with a different frequency than the previously configured ( $OCF_RESKEY_interval )" - ocf_log warn "Either change the monitor interval to match or, ideally, switch to the ocf:pacemaker:ping agent and avoid all this compatibility nonsense." - fi - fi - ;; - meta-data) - cat < - - -1.0 - -This agent (ocf:pacemaker:pingd) is deprecated and broken, and has been -replaced by the more reliable ocf:pacemaker:ping. It records (in the CIB) -the current number of ping nodes (specified in the 'host_list' parameter) -a cluster node can connect to. - -pingd resource agent - - - - -PID file -PID file - - - - - - -The user we want to run pingd as - -The user we want to run pingd as - - - - - -The time to wait (dampening) further changes occur - -Dampening interval - - - - - -The name of the instance_attributes set to place the value in. Rarely needs to be specified. - -Set name - - - - - -The name of the attributes to set. This is the name to be used in the constraints. - -Attribute name - - - - - -The section place the value in. Rarely needs to be specified. - -Section name - - - - - -The number by which to multiply the number of connected ping nodes by - -Value multiplier - - - - - -The list of ping nodes to count. Defaults to all configured ping nodes. Rarely needs to be specified. - -Host list - - - - - -How often, in seconds, to check for node liveliness - -ping interval in seconds - - - - - -Number of ping attempts, per host, before declaring it dead - -no. of ping attempts - - - - - -How long, in seconds, to wait before declaring a ping lost - -ping timeout in seconds - - - - - -A catch all for any other options that need to be passed to pingd. - -Extra Options - - - - - - - - - - - - - -END - exit $OCF_SUCCESS - ;; -esac - -"${OCF_ROOT}/resource.d/pacemaker/ping" "$1" -exit $? - -# vim: set filetype=sh expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=80: diff --git a/include/portability.h b/include/portability.h index a6eff73cd4..2750120d43 100644 --- a/include/portability.h +++ b/include/portability.h @@ -1,186 +1,168 @@ /* * Copyright 2001-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PORTABILITY_H # define PORTABILITY_H # define EOS '\0' # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) /* Needs to be defined before any other includes, otherwise some system * headers do not behave as expected! Major black magic... */ # undef _GNU_SOURCE /* in case it was defined on the command line */ # define _GNU_SOURCE /* Please leave this as the first #include - Solaris needs it there */ # ifdef HAVE_CONFIG_H # ifndef PCMK__CONFIG_H # define PCMK__CONFIG_H # include # endif # endif /* Prototypes for libreplace functions */ # ifndef HAVE_DAEMON /* We supply a replacement function, but need a prototype */ int daemon(int nochdir, int noclose); # endif /* HAVE_DAEMON */ # ifndef HAVE_SETENV /* We supply a replacement function, but need a prototype */ int setenv(const char *name, const char *value, int why); # endif /* HAVE_SETENV */ # ifndef HAVE_STRERROR /* We supply a replacement function, but need a prototype */ char *strerror(int errnum); # endif /* HAVE_STRERROR */ # ifndef HAVE_STRCHRNUL /* We supply a replacement function, but need a prototype */ char *strchrnul(const char *s, int c_in); # endif /* HAVE_STRCHRNUL */ # ifndef HAVE_ALPHASORT # include int alphasort(const void *dirent1, const void *dirent2); # endif /* HAVE_ALPHASORT */ # ifndef HAVE_STRNLEN size_t strnlen(const char *s, size_t maxlen); # else # define USE_GNU # endif # ifndef HAVE_STRNDUP char *strndup(const char *str, size_t len); # else # define USE_GNU # endif -// This test could be better, but it covers platforms of interest -# if defined(ON_BSD) || defined(ON_SOLARIS) -# define SUPPORT_PROCFS 0 -# else -# define SUPPORT_PROCFS 1 -# endif - # include -# if !GLIB_CHECK_VERSION(2,28,0) -# include -/* Since: 2.28 */ -static inline void -g_list_free_full(GList * list, GDestroyNotify free_func) -{ - g_list_foreach(list, (GFunc) free_func, NULL); - g_list_free(list); -} -# endif - # if !GLIB_CHECK_VERSION(2,38,0) # define g_assert_true(expr) g_assert_cmpint((expr), ==, TRUE) # define g_assert_false(expr) g_assert_cmpint((expr), !=, TRUE) # define g_assert_null(expr) g_assert_cmpint((expr) == NULL, ==, TRUE) # endif # if SUPPORT_DBUS # ifndef HAVE_DBUSBASICVALUE # include # include /** * An 8-byte struct you could use to access int64 without having * int64 support */ typedef struct { uint32_t first32; /**< first 32 bits in the 8 bytes (beware endian issues) */ uint32_t second32; /**< second 32 bits in the 8 bytes (beware endian issues) */ } DBus8ByteStruct; /** * A simple value union that lets you access bytes as if they * were various types; useful when dealing with basic types via * void pointers and varargs. * * This union also contains a pointer member (which can be used * to retrieve a string from dbus_message_iter_get_basic(), for * instance), so on future platforms it could conceivably be larger * than 8 bytes. */ typedef union { unsigned char bytes[8]; /**< as 8 individual bytes */ int16_t i16; /**< as int16 */ uint16_t u16; /**< as int16 */ int32_t i32; /**< as int32 */ uint32_t u32; /**< as int32 */ uint32_t bool_val; /**< as boolean */ # ifdef DBUS_HAVE_INT64 int64_t i64; /**< as int64 */ uint64_t u64; /**< as int64 */ # endif DBus8ByteStruct eight; /**< as 8-byte struct */ double dbl; /**< as double */ unsigned char byt; /**< as byte */ char *str; /**< as char* (string, object path or signature) */ int fd; /**< as Unix file descriptor */ } DBusBasicValue; # endif # endif /* Replacement error codes for non-linux */ # include # ifndef ENOTUNIQ # define ENOTUNIQ 190 # endif # ifndef ECOMM # define ECOMM 191 # endif # ifndef ELIBACC # define ELIBACC 192 # endif # ifndef EREMOTEIO # define EREMOTEIO 193 # endif # ifndef EUNATCH # define EUNATCH 194 # endif # ifndef ENOKEY # define ENOKEY 195 # endif # ifndef ENODATA # define ENODATA 196 # endif # ifndef ETIME # define ETIME 197 # endif # ifndef ENOSR # define ENOSR 198 # endif # ifndef ENOSTR # define ENOSTR 199 # endif # ifndef EKEYREJECTED # define EKEYREJECTED 200 # endif #endif /* PORTABILITY_H */ diff --git a/lib/cib/cib_remote.c b/lib/cib/cib_remote.c index 4de0a0f7b5..7686637dbf 100644 --- a/lib/cib/cib_remote.c +++ b/lib/cib/cib_remote.c @@ -1,636 +1,633 @@ /* * Copyright 2008-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include gnutls_anon_client_credentials_t anon_cred_c; # define DEFAULT_CLIENT_HANDSHAKE_TIMEOUT 5000 /* 5 seconds */ const int kx_prio[] = { GNUTLS_KX_ANON_DH, 0 }; static gboolean remote_gnutls_credentials_init = FALSE; #else typedef void gnutls_session_t; #endif #include -#ifndef ON_BSD -# include -#endif #define DH_BITS 1024 typedef struct cib_remote_opaque_s { int flags; int socket; int port; char *server; char *user; char *passwd; gboolean encrypted; pcmk__remote_t command; pcmk__remote_t callback; } cib_remote_opaque_t; void cib_remote_connection_destroy(gpointer user_data); int cib_remote_callback_dispatch(gpointer user_data); int cib_remote_command_dispatch(gpointer user_data); int cib_remote_signon(cib_t * cib, const char *name, enum cib_conn_type type); int cib_remote_signoff(cib_t * cib); int cib_remote_free(cib_t * cib); int cib_remote_perform_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *name); static int cib_remote_inputfd(cib_t * cib) { cib_remote_opaque_t *private = cib->variant_opaque; return private->callback.tcp_socket; } static int cib_remote_set_connection_dnotify(cib_t * cib, void (*dnotify) (gpointer user_data)) { return -EPROTONOSUPPORT; } static int cib_remote_register_notification(cib_t * cib, const char *callback, int enabled) { xmlNode *notify_msg = create_xml_node(NULL, "cib_command"); cib_remote_opaque_t *private = cib->variant_opaque; crm_xml_add(notify_msg, F_CIB_OPERATION, T_CIB_NOTIFY); crm_xml_add(notify_msg, F_CIB_NOTIFY_TYPE, callback); crm_xml_add_int(notify_msg, F_CIB_NOTIFY_ACTIVATE, enabled); pcmk__remote_send_xml(&private->callback, notify_msg); free_xml(notify_msg); return pcmk_ok; } cib_t * cib_remote_new(const char *server, const char *user, const char *passwd, int port, gboolean encrypted) { cib_remote_opaque_t *private = NULL; cib_t *cib = cib_new_variant(); private = calloc(1, sizeof(cib_remote_opaque_t)); cib->variant = cib_remote; cib->variant_opaque = private; if (server) { private->server = strdup(server); } if (user) { private->user = strdup(user); } if (passwd) { private->passwd = strdup(passwd); } private->port = port; private->encrypted = encrypted; /* assign variant specific ops */ cib->delegate_fn = cib_remote_perform_op; cib->cmds->signon = cib_remote_signon; cib->cmds->signoff = cib_remote_signoff; cib->cmds->free = cib_remote_free; cib->cmds->inputfd = cib_remote_inputfd; cib->cmds->register_notification = cib_remote_register_notification; cib->cmds->set_connection_dnotify = cib_remote_set_connection_dnotify; return cib; } static int cib_tls_close(cib_t * cib) { cib_remote_opaque_t *private = cib->variant_opaque; #ifdef HAVE_GNUTLS_GNUTLS_H if (private->encrypted) { if (private->command.tls_session) { gnutls_bye(*(private->command.tls_session), GNUTLS_SHUT_RDWR); gnutls_deinit(*(private->command.tls_session)); gnutls_free(private->command.tls_session); } if (private->callback.tls_session) { gnutls_bye(*(private->callback.tls_session), GNUTLS_SHUT_RDWR); gnutls_deinit(*(private->callback.tls_session)); gnutls_free(private->callback.tls_session); } private->command.tls_session = NULL; private->callback.tls_session = NULL; if (remote_gnutls_credentials_init) { gnutls_anon_free_client_credentials(anon_cred_c); gnutls_global_deinit(); remote_gnutls_credentials_init = FALSE; } } #endif if (private->command.tcp_socket) { shutdown(private->command.tcp_socket, SHUT_RDWR); /* no more receptions */ close(private->command.tcp_socket); } if (private->callback.tcp_socket) { shutdown(private->callback.tcp_socket, SHUT_RDWR); /* no more receptions */ close(private->callback.tcp_socket); } private->command.tcp_socket = 0; private->callback.tcp_socket = 0; free(private->command.buffer); free(private->callback.buffer); private->command.buffer = NULL; private->callback.buffer = NULL; return 0; } static inline int cib__tls_client_handshake(pcmk__remote_t *remote) { #ifdef HAVE_GNUTLS_GNUTLS_H return pcmk__tls_client_handshake(remote, DEFAULT_CLIENT_HANDSHAKE_TIMEOUT); #else return 0; #endif } static int cib_tls_signon(cib_t *cib, pcmk__remote_t *connection, gboolean event_channel) { cib_remote_opaque_t *private = cib->variant_opaque; int rc; xmlNode *answer = NULL; xmlNode *login = NULL; static struct mainloop_fd_callbacks cib_fd_callbacks = { 0, }; cib_fd_callbacks.dispatch = event_channel ? cib_remote_callback_dispatch : cib_remote_command_dispatch; cib_fd_callbacks.destroy = cib_remote_connection_destroy; connection->tcp_socket = -1; #ifdef HAVE_GNUTLS_GNUTLS_H connection->tls_session = NULL; #endif rc = pcmk__connect_remote(private->server, private->port, 0, NULL, &(connection->tcp_socket), NULL, NULL); if (rc != pcmk_rc_ok) { crm_info("Remote connection to %s:%d failed: %s " CRM_XS " rc=%d", private->server, private->port, pcmk_rc_str(rc), rc); return -ENOTCONN; } if (private->encrypted) { /* initialize GnuTls lib */ #ifdef HAVE_GNUTLS_GNUTLS_H if (remote_gnutls_credentials_init == FALSE) { crm_gnutls_global_init(); gnutls_anon_allocate_client_credentials(&anon_cred_c); remote_gnutls_credentials_init = TRUE; } /* bind the socket to GnuTls lib */ connection->tls_session = pcmk__new_tls_session(connection->tcp_socket, GNUTLS_CLIENT, GNUTLS_CRD_ANON, anon_cred_c); if (connection->tls_session == NULL) { cib_tls_close(cib); return -1; } if (cib__tls_client_handshake(connection) != pcmk_rc_ok) { crm_err("Session creation for %s:%d failed", private->server, private->port); gnutls_deinit(*connection->tls_session); gnutls_free(connection->tls_session); connection->tls_session = NULL; cib_tls_close(cib); return -1; } #else return -EPROTONOSUPPORT; #endif } /* login to server */ login = create_xml_node(NULL, "cib_command"); crm_xml_add(login, "op", "authenticate"); crm_xml_add(login, "user", private->user); crm_xml_add(login, "password", private->passwd); crm_xml_add(login, "hidden", "password"); pcmk__remote_send_xml(connection, login); free_xml(login); rc = pcmk_ok; if (pcmk__read_remote_message(connection, -1) == ENOTCONN) { rc = -ENOTCONN; } answer = pcmk__remote_message_xml(connection); crm_log_xml_trace(answer, "Reply"); if (answer == NULL) { rc = -EPROTO; } else { /* grab the token */ const char *msg_type = crm_element_value(answer, F_CIB_OPERATION); const char *tmp_ticket = crm_element_value(answer, F_CIB_CLIENTID); if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) { crm_err("Invalid registration message: %s", msg_type); rc = -EPROTO; } else if (tmp_ticket == NULL) { rc = -EPROTO; } else { connection->token = strdup(tmp_ticket); } } free_xml(answer); answer = NULL; if (rc != 0) { cib_tls_close(cib); return rc; } crm_trace("remote client connection established"); connection->source = mainloop_add_fd("cib-remote", G_PRIORITY_HIGH, connection->tcp_socket, cib, &cib_fd_callbacks); return rc; } void cib_remote_connection_destroy(gpointer user_data) { crm_err("Connection destroyed"); #ifdef HAVE_GNUTLS_GNUTLS_H cib_tls_close(user_data); #endif return; } int cib_remote_command_dispatch(gpointer user_data) { int rc; cib_t *cib = user_data; cib_remote_opaque_t *private = cib->variant_opaque; rc = pcmk__read_remote_message(&private->command, -1); free(private->command.buffer); private->command.buffer = NULL; crm_err("received late reply for remote cib connection, discarding"); if (rc == ENOTCONN) { return -1; } return 0; } int cib_remote_callback_dispatch(gpointer user_data) { int rc; cib_t *cib = user_data; cib_remote_opaque_t *private = cib->variant_opaque; xmlNode *msg = NULL; crm_info("Message on callback channel"); rc = pcmk__read_remote_message(&private->callback, -1); msg = pcmk__remote_message_xml(&private->callback); while (msg) { const char *type = crm_element_value(msg, F_TYPE); crm_trace("Activating %s callbacks...", type); if (pcmk__str_eq(type, T_CIB, pcmk__str_casei)) { cib_native_callback(cib, msg, 0, 0); } else if (pcmk__str_eq(type, T_CIB_NOTIFY, pcmk__str_casei)) { g_list_foreach(cib->notify_list, cib_native_notify, msg); } else { crm_err("Unknown message type: %s", type); } free_xml(msg); msg = pcmk__remote_message_xml(&private->callback); } if (rc == ENOTCONN) { return -1; } return 0; } int cib_remote_signon(cib_t * cib, const char *name, enum cib_conn_type type) { int rc = pcmk_ok; cib_remote_opaque_t *private = cib->variant_opaque; if (private->passwd == NULL) { struct termios settings; rc = tcgetattr(0, &settings); if(rc == 0) { settings.c_lflag &= ~ECHO; rc = tcsetattr(0, TCSANOW, &settings); } if(rc == 0) { fprintf(stderr, "Password: "); private->passwd = calloc(1, 1024); rc = scanf("%1023s", private->passwd); fprintf(stderr, "\n"); } if (rc < 1) { private->passwd = NULL; } settings.c_lflag |= ECHO; rc = tcsetattr(0, TCSANOW, &settings); } if (private->server == NULL || private->user == NULL) { rc = -EINVAL; } if (rc == pcmk_ok) { rc = cib_tls_signon(cib, &(private->command), FALSE); } if (rc == pcmk_ok) { rc = cib_tls_signon(cib, &(private->callback), TRUE); } if (rc == pcmk_ok) { xmlNode *hello = cib_create_op(0, private->callback.token, CRM_OP_REGISTER, NULL, NULL, NULL, 0, NULL); crm_xml_add(hello, F_CIB_CLIENTNAME, name); pcmk__remote_send_xml(&private->command, hello); free_xml(hello); } if (rc == pcmk_ok) { crm_info("Opened connection to %s:%d for %s", private->server, private->port, name); cib->state = cib_connected_command; cib->type = cib_command; } else { crm_info("Connection to %s:%d for %s failed: %s\n", private->server, private->port, name, pcmk_strerror(rc)); } return rc; } int cib_remote_signoff(cib_t * cib) { int rc = pcmk_ok; /* cib_remote_opaque_t *private = cib->variant_opaque; */ crm_debug("Disconnecting from the CIB manager"); #ifdef HAVE_GNUTLS_GNUTLS_H cib_tls_close(cib); #endif cib->state = cib_disconnected; cib->type = cib_no_connection; return rc; } int cib_remote_free(cib_t * cib) { int rc = pcmk_ok; crm_warn("Freeing CIB"); if (cib->state != cib_disconnected) { rc = cib_remote_signoff(cib); if (rc == pcmk_ok) { cib_remote_opaque_t *private = cib->variant_opaque; free(private->server); free(private->user); free(private->passwd); free(cib->cmds); free(private); free(cib); } } return rc; } int cib_remote_perform_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *name) { int rc; int remaining_time = 0; time_t start_time; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; cib_remote_opaque_t *private = cib->variant_opaque; if (cib->state == cib_disconnected) { return -ENOTCONN; } if (output_data != NULL) { *output_data = NULL; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } cib->call_id++; if (cib->call_id < 1) { cib->call_id = 1; } op_msg = cib_create_op(cib->call_id, private->callback.token, op, host, section, data, call_options, NULL); if (op_msg == NULL) { return -EPROTO; } crm_trace("Sending %s message to the CIB manager", op); if (!(call_options & cib_sync_call)) { pcmk__remote_send_xml(&private->callback, op_msg); } else { pcmk__remote_send_xml(&private->command, op_msg); } free_xml(op_msg); if ((call_options & cib_discard_reply)) { crm_trace("Discarding reply"); return pcmk_ok; } else if (!(call_options & cib_sync_call)) { return cib->call_id; } crm_trace("Waiting for a synchronous reply"); start_time = time(NULL); remaining_time = cib->call_timeout ? cib->call_timeout : 60; rc = pcmk_rc_ok; while (remaining_time > 0 && (rc != ENOTCONN)) { int reply_id = -1; int msg_id = cib->call_id; rc = pcmk__read_remote_message(&private->command, remaining_time * 1000); op_reply = pcmk__remote_message_xml(&private->command); if (!op_reply) { break; } crm_element_value_int(op_reply, F_CIB_CALLID, &reply_id); if (reply_id == msg_id) { break; } else if (reply_id < msg_id) { crm_debug("Received old reply: %d (wanted %d)", reply_id, msg_id); crm_log_xml_trace(op_reply, "Old reply"); } else if ((reply_id - 10000) > msg_id) { /* wrap-around case */ crm_debug("Received old reply: %d (wanted %d)", reply_id, msg_id); crm_log_xml_trace(op_reply, "Old reply"); } else { crm_err("Received a __future__ reply:" " %d (wanted %d)", reply_id, msg_id); } free_xml(op_reply); op_reply = NULL; /* wasn't the right reply, try and read some more */ remaining_time = time(NULL) - start_time; } /* if(IPC_ISRCONN(native->command_channel) == FALSE) { */ /* crm_err("The CIB manager disconnected: %d", */ /* native->command_channel->ch_status); */ /* cib->state = cib_disconnected; */ /* } */ if (rc == ENOTCONN) { crm_err("Disconnected while waiting for reply."); return -ENOTCONN; } else if (op_reply == NULL) { crm_err("No reply message - empty"); return -ENOMSG; } crm_trace("Synchronous reply received"); /* Start processing the reply... */ if (crm_element_value_int(op_reply, F_CIB_RC, &rc) != 0) { rc = -EPROTO; } if (rc == -pcmk_err_diff_resync) { /* This is an internal value that clients do not and should not care about */ rc = pcmk_ok; } if (rc == pcmk_ok || rc == -EPERM) { crm_log_xml_debug(op_reply, "passed"); } else { /* } else if(rc == -ETIME) { */ crm_err("Call failed: %s", pcmk_strerror(rc)); crm_log_xml_warn(op_reply, "failed"); } if (output_data == NULL) { /* do nothing more */ } else if (!(call_options & cib_discard_reply)) { xmlNode *tmp = get_message_xml(op_reply, F_CIB_CALLDATA); if (tmp == NULL) { crm_trace("No output in reply to \"%s\" command %d", op, cib->call_id - 1); } else { *output_data = copy_xml(tmp); } } free_xml(op_reply); return rc; } diff --git a/lib/common/acl.c b/lib/common/acl.c index 8fd915a7f0..f80a204a76 100644 --- a/lib/common/acl.c +++ b/lib/common/acl.c @@ -1,808 +1,794 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "crmcommon_private.h" #define MAX_XPATH_LEN 4096 typedef struct xml_acl_s { enum xml_private_flags mode; char *xpath; } xml_acl_t; static void free_acl(void *data) { if (data) { xml_acl_t *acl = data; free(acl->xpath); free(acl); } } void pcmk__free_acls(GList *acls) { g_list_free_full(acls, free_acl); } static GList * create_acl(xmlNode *xml, GList *acls, enum xml_private_flags mode) { xml_acl_t *acl = NULL; const char *tag = crm_element_value(xml, XML_ACL_ATTR_TAG); const char *ref = crm_element_value(xml, XML_ACL_ATTR_REF); const char *xpath = crm_element_value(xml, XML_ACL_ATTR_XPATH); const char *attr = crm_element_value(xml, XML_ACL_ATTR_ATTRIBUTE); if (tag == NULL) { // @COMPAT rolling upgrades <=1.1.11 tag = crm_element_value(xml, XML_ACL_ATTR_TAGv1); } if (ref == NULL) { // @COMPAT rolling upgrades <=1.1.11 ref = crm_element_value(xml, XML_ACL_ATTR_REFv1); } if ((tag == NULL) && (ref == NULL) && (xpath == NULL)) { // Schema should prevent this, but to be safe ... crm_trace("Ignoring ACL <%s> element without selection criteria", crm_element_name(xml)); return NULL; } acl = calloc(1, sizeof (xml_acl_t)); CRM_ASSERT(acl != NULL); acl->mode = mode; if (xpath) { acl->xpath = strdup(xpath); CRM_ASSERT(acl->xpath != NULL); crm_trace("Unpacked ACL <%s> element using xpath: %s", crm_element_name(xml), acl->xpath); } else { int offset = 0; char buffer[MAX_XPATH_LEN]; if (tag) { offset += snprintf(buffer + offset, MAX_XPATH_LEN - offset, "//%s", tag); } else { offset += snprintf(buffer + offset, MAX_XPATH_LEN - offset, "//*"); } if (ref || attr) { offset += snprintf(buffer + offset, MAX_XPATH_LEN - offset, "["); } if (ref) { offset += snprintf(buffer + offset, MAX_XPATH_LEN - offset, "@id='%s'", ref); } // NOTE: schema currently does not allow this if (ref && attr) { offset += snprintf(buffer + offset, MAX_XPATH_LEN - offset, " and "); } if (attr) { offset += snprintf(buffer + offset, MAX_XPATH_LEN - offset, "@%s", attr); } if (ref || attr) { offset += snprintf(buffer + offset, MAX_XPATH_LEN - offset, "]"); } CRM_LOG_ASSERT(offset > 0); acl->xpath = strdup(buffer); CRM_ASSERT(acl->xpath != NULL); crm_trace("Unpacked ACL <%s> element as xpath: %s", crm_element_name(xml), acl->xpath); } return g_list_append(acls, acl); } /*! * \internal * \brief Unpack a user, group, or role subtree of the ACLs section * * \param[in] acl_top XML of entire ACLs section * \param[in] acl_entry XML of ACL element being unpacked * \param[in,out] acls List of ACLs unpacked so far * * \return New head of (possibly modified) acls */ static GList * parse_acl_entry(xmlNode *acl_top, xmlNode *acl_entry, GList *acls) { xmlNode *child = NULL; for (child = pcmk__xe_first_child(acl_entry); child; child = pcmk__xe_next(child)) { const char *tag = crm_element_name(child); const char *kind = crm_element_value(child, XML_ACL_ATTR_KIND); if (strcmp(XML_ACL_TAG_PERMISSION, tag) == 0){ CRM_ASSERT(kind != NULL); crm_trace("Unpacking ACL <%s> element of kind '%s'", tag, kind); tag = kind; } else { crm_trace("Unpacking ACL <%s> element", tag); } if (strcmp(XML_ACL_TAG_ROLE_REF, tag) == 0 || strcmp(XML_ACL_TAG_ROLE_REFv1, tag) == 0) { const char *ref_role = crm_element_value(child, XML_ATTR_ID); if (ref_role) { xmlNode *role = NULL; for (role = pcmk__xe_first_child(acl_top); role; role = pcmk__xe_next(role)) { if (!strcmp(XML_ACL_TAG_ROLE, (const char *) role->name)) { const char *role_id = crm_element_value(role, XML_ATTR_ID); if (role_id && strcmp(ref_role, role_id) == 0) { crm_trace("Unpacking referenced role '%s' in ACL <%s> element", role_id, crm_element_name(acl_entry)); acls = parse_acl_entry(acl_top, role, acls); break; } } } } } else if (strcmp(XML_ACL_TAG_READ, tag) == 0) { acls = create_acl(child, acls, xpf_acl_read); } else if (strcmp(XML_ACL_TAG_WRITE, tag) == 0) { acls = create_acl(child, acls, xpf_acl_write); } else if (strcmp(XML_ACL_TAG_DENY, tag) == 0) { acls = create_acl(child, acls, xpf_acl_deny); } else { crm_warn("Ignoring unknown ACL %s '%s'", (kind? "kind" : "element"), tag); } } return acls; } /* */ static const char * acl_to_text(enum xml_private_flags flags) { if (pcmk_is_set(flags, xpf_acl_deny)) { return "deny"; } else if (pcmk_any_flags_set(flags, xpf_acl_write|xpf_acl_create)) { return "read/write"; } else if (pcmk_is_set(flags, xpf_acl_read)) { return "read"; } return "none"; } void pcmk__apply_acl(xmlNode *xml) { GListPtr aIter = NULL; xml_private_t *p = xml->doc->_private; xmlXPathObjectPtr xpathObj = NULL; if (!xml_acl_enabled(xml)) { crm_trace("Skipping ACLs for user '%s' because not enabled for this XML", p->user); return; } for (aIter = p->acls; aIter != NULL; aIter = aIter->next) { int max = 0, lpc = 0; xml_acl_t *acl = aIter->data; xpathObj = xpath_search(xml, acl->xpath); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); char *path = xml_get_path(match); p = match->_private; crm_trace("Applying %s ACL to %s matched by %s", acl_to_text(acl->mode), path, acl->xpath); - -#ifdef SUSE_ACL_COMPAT - if (!pcmk_is_set(p->flags, acl->mode) - && pcmk_any_flags_set(p->flags, - xpf_acl_read|xpf_acl_write|xpf_acl_deny)) { - pcmk__config_warn("Configuration element %s is matched by " - "multiple ACL rules, only the first applies " - "('%s' wins over '%s')", - path, acl_to_text(p->flags), - acl_to_text(acl->mode)); - free(path); - continue; - } -#endif pcmk__set_xml_flags(p, acl->mode); free(path); } crm_trace("Applied %s ACL %s (%d match%s)", acl_to_text(acl->mode), acl->xpath, max, ((max == 1)? "" : "es")); freeXpathObject(xpathObj); } } /*! * \internal * \brief Unpack ACLs for a given user * * \param[in] source XML with ACL definitions * \param[in,out] target XML that ACLs will be applied to * \param[in] user Username whose ACLs need to be unpacked */ void pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user) { #if ENABLE_ACL xml_private_t *p = NULL; if ((target == NULL) || (target->doc == NULL) || (target->doc->_private == NULL)) { return; } p = target->doc->_private; if (!pcmk_acl_required(user)) { crm_trace("Not unpacking ACLs because not required for user '%s'", user); } else if (p->acls == NULL) { xmlNode *acls = get_xpath_object("//" XML_CIB_TAG_ACLS, source, LOG_NEVER); free(p->user); p->user = strdup(user); if (acls) { xmlNode *child = NULL; for (child = pcmk__xe_first_child(acls); child; child = pcmk__xe_next(child)) { const char *tag = crm_element_name(child); if (!strcmp(tag, XML_ACL_TAG_USER) || !strcmp(tag, XML_ACL_TAG_USERv1)) { const char *id = crm_element_value(child, XML_ATTR_ID); if (id && strcmp(id, user) == 0) { crm_debug("Unpacking ACLs for user '%s'", id); p->acls = parse_acl_entry(acls, child, p->acls); } } } } } #endif } static inline bool test_acl_mode(enum xml_private_flags allowed, enum xml_private_flags requested) { if (pcmk_is_set(allowed, xpf_acl_deny)) { return false; } else if (pcmk_all_flags_set(allowed, requested)) { return true; } else if (pcmk_is_set(requested, xpf_acl_read) && pcmk_is_set(allowed, xpf_acl_write)) { return true; } else if (pcmk_is_set(requested, xpf_acl_create) && pcmk_any_flags_set(allowed, xpf_acl_write|xpf_created)) { return true; } return false; } static bool purge_xml_attributes(xmlNode *xml) { xmlNode *child = NULL; xmlAttr *xIter = NULL; bool readable_children = false; xml_private_t *p = xml->_private; if (test_acl_mode(p->flags, xpf_acl_read)) { crm_trace("%s[@id=%s] is readable", crm_element_name(xml), ID(xml)); return true; } xIter = xml->properties; while (xIter != NULL) { xmlAttr *tmp = xIter; const char *prop_name = (const char *)xIter->name; xIter = xIter->next; if (strcmp(prop_name, XML_ATTR_ID) == 0) { continue; } xmlUnsetProp(xml, tmp->name); } child = pcmk__xml_first_child(xml); while ( child != NULL ) { xmlNode *tmp = child; child = pcmk__xml_next(child); readable_children |= purge_xml_attributes(tmp); } if (!readable_children) { free_xml(xml); /* Nothing readable under here, purge completely */ } return readable_children; } /*! * \internal * \brief Copy ACL-allowed portions of specified XML * * \param[in] user Username whose ACLs should be used * \param[in] acl_source XML containing ACLs * \param[in] xml XML to be copied * \param[out] result Copy of XML portions readable via ACLs * * \return true if xml exists and ACLs are required for user, false otherwise * \note If this returns true, caller should use \p result rather than \p xml */ bool xml_acl_filtered_copy(const char *user, xmlNode *acl_source, xmlNode *xml, xmlNode **result) { GListPtr aIter = NULL; xmlNode *target = NULL; xml_private_t *doc = NULL; *result = NULL; if ((xml == NULL) || !pcmk_acl_required(user)) { crm_trace("Not filtering XML because ACLs not required for user '%s'", user); return false; } crm_trace("Filtering XML copy using user '%s' ACLs", user); target = copy_xml(xml); if (target == NULL) { return true; } pcmk__unpack_acl(acl_source, target, user); pcmk__set_xml_doc_flag(target, xpf_acl_enabled); pcmk__apply_acl(target); doc = target->doc->_private; for(aIter = doc->acls; aIter != NULL && target; aIter = aIter->next) { int max = 0; xml_acl_t *acl = aIter->data; if (acl->mode != xpf_acl_deny) { /* Nothing to do */ } else if (acl->xpath) { int lpc = 0; xmlXPathObjectPtr xpathObj = xpath_search(target, acl->xpath); max = numXpathResults(xpathObj); for(lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); if (!purge_xml_attributes(match) && (match == target)) { crm_trace("ACLs deny user '%s' access to entire XML document", user); freeXpathObject(xpathObj); return true; } } crm_trace("ACLs deny user '%s' access to %s (%d %s)", user, acl->xpath, max, pcmk__plural_alt(max, "match", "matches")); freeXpathObject(xpathObj); } } if (!purge_xml_attributes(target)) { crm_trace("ACLs deny user '%s' access to entire XML document", user); return true; } if (doc->acls) { g_list_free_full(doc->acls, free_acl); doc->acls = NULL; } else { crm_trace("User '%s' without ACLs denied access to entire XML document", user); free_xml(target); target = NULL; } if (target) { *result = target; } return true; } /*! * \internal * \brief Check whether creation of an XML element is implicitly allowed * * Check whether XML is a "scaffolding" element whose creation is implicitly * allowed regardless of ACLs (that is, it is not in the ACL section and has * no attributes other than "id"). * * \param[in] xml XML element to check * * \return true if XML element is implicitly allowed, false otherwise */ static bool implicitly_allowed(xmlNode *xml) { char *path = NULL; for (xmlAttr *prop = xml->properties; prop != NULL; prop = prop->next) { if (strcmp((const char *) prop->name, XML_ATTR_ID) != 0) { return false; } } path = xml_get_path(xml); if (strstr(path, "/" XML_CIB_TAG_ACLS "/") != NULL) { free(path); return false; } free(path); return true; } #define display_id(xml) (ID(xml)? ID(xml) : "") /*! * \internal * \brief Drop XML nodes created in violation of ACLs * * Given an XML element, free all of its descendent nodes created in violation * of ACLs, with the exception of allowing "scaffolding" elements (i.e. those * that aren't in the ACL section and don't have any attributes other than * "id"). * * \param[in,out] xml XML to check * \param[in] check_top Whether to apply checks to argument itself * (if true, xml might get freed) */ void pcmk__apply_creation_acl(xmlNode *xml, bool check_top) { xml_private_t *p = xml->_private; if (pcmk_is_set(p->flags, xpf_created)) { if (implicitly_allowed(xml)) { crm_trace("Creation of <%s> scaffolding with id=\"%s\"" " is implicitly allowed", crm_element_name(xml), display_id(xml)); } else if (pcmk__check_acl(xml, NULL, xpf_acl_write)) { crm_trace("ACLs allow creation of <%s> with id=\"%s\"", crm_element_name(xml), display_id(xml)); } else if (check_top) { crm_trace("ACLs disallow creation of <%s> with id=\"%s\"", crm_element_name(xml), display_id(xml)); pcmk_free_xml_subtree(xml); return; } else { crm_notice("ACLs would disallow creation of %s<%s> with id=\"%s\" ", ((xml == xmlDocGetRootElement(xml->doc))? "root element " : ""), crm_element_name(xml), display_id(xml)); } } for (xmlNode *cIter = pcmk__xml_first_child(xml); cIter != NULL; ) { xmlNode *child = cIter; cIter = pcmk__xml_next(cIter); /* In case it is free'd */ pcmk__apply_creation_acl(child, true); } } bool xml_acl_denied(xmlNode *xml) { if (xml && xml->doc && xml->doc->_private){ xml_private_t *p = xml->doc->_private; return pcmk_is_set(p->flags, xpf_acl_denied); } return false; } void xml_acl_disable(xmlNode *xml) { if (xml_acl_enabled(xml)) { xml_private_t *p = xml->doc->_private; /* Catch anything that was created but shouldn't have been */ pcmk__apply_acl(xml); pcmk__apply_creation_acl(xml, false); pcmk__clear_xml_flags(p, xpf_acl_enabled); } } bool xml_acl_enabled(xmlNode *xml) { if (xml && xml->doc && xml->doc->_private){ xml_private_t *p = xml->doc->_private; return pcmk_is_set(p->flags, xpf_acl_enabled); } return false; } bool pcmk__check_acl(xmlNode *xml, const char *name, enum xml_private_flags mode) { CRM_ASSERT(xml); CRM_ASSERT(xml->doc); CRM_ASSERT(xml->doc->_private); #if ENABLE_ACL if (pcmk__tracking_xml_changes(xml, false) && xml_acl_enabled(xml)) { int offset = 0; xmlNode *parent = xml; char buffer[MAX_XPATH_LEN]; xml_private_t *docp = xml->doc->_private; offset = pcmk__element_xpath(NULL, xml, buffer, offset, sizeof(buffer)); if (name) { offset += snprintf(buffer + offset, MAX_XPATH_LEN - offset, "[@%s]", name); } CRM_LOG_ASSERT(offset > 0); if (docp->acls == NULL) { crm_trace("User '%s' without ACLs denied %s access to %s", docp->user, acl_to_text(mode), buffer); pcmk__set_xml_doc_flag(xml, xpf_acl_denied); return false; } /* Walk the tree upwards looking for xml_acl_* flags * - Creating an attribute requires write permissions for the node * - Creating a child requires write permissions for the parent */ if (name) { xmlAttr *attr = xmlHasProp(xml, (pcmkXmlStr) name); if (attr && mode == xpf_acl_create) { mode = xpf_acl_write; } } while (parent && parent->_private) { xml_private_t *p = parent->_private; if (test_acl_mode(p->flags, mode)) { return true; } else if (pcmk_is_set(p->flags, xpf_acl_deny)) { crm_trace("%sACL denies user '%s' %s access to %s", (parent != xml) ? "Parent " : "", docp->user, acl_to_text(mode), buffer); pcmk__set_xml_doc_flag(xml, xpf_acl_denied); return false; } parent = parent->parent; } crm_trace("Default ACL denies user '%s' %s access to %s", docp->user, acl_to_text(mode), buffer); pcmk__set_xml_doc_flag(xml, xpf_acl_denied); return false; } #endif return true; } /*! * \brief Check whether ACLs are required for a given user * * \param[in] User name to check * * \return true if the user requires ACLs, false otherwise */ bool pcmk_acl_required(const char *user) { #if ENABLE_ACL if (pcmk__str_empty(user)) { crm_trace("ACLs not required because no user set"); return false; } else if (!strcmp(user, CRM_DAEMON_USER) || !strcmp(user, "root")) { crm_trace("ACLs not required for privileged user %s", user); return false; } crm_trace("ACLs required for %s", user); return true; #else crm_trace("ACLs not required because not supported by this build"); return false; #endif } #if ENABLE_ACL char * pcmk__uid2username(uid_t uid) { struct passwd *pwent = getpwuid(uid); if (pwent == NULL) { crm_perror(LOG_INFO, "Cannot get user details for user ID %d", uid); return NULL; } return strdup(pwent->pw_name); } /*! * \internal * \brief Set the ACL user field properly on an XML request * * Multiple user names are potentially involved in an XML request: the effective * user of the current process; the user name known from an IPC client * connection; and the user name obtained from the request itself, whether by * the current standard XML attribute name or an older legacy attribute name. * This function chooses the appropriate one that should be used for ACLs, sets * it in the request (using the standard attribute name, and the legacy name if * given), and returns it. * * \param[in,out] request XML request to update * \param[in] field Alternate name for ACL user name XML attribute * \param[in] peer_user User name as known from IPC connection * * \return ACL user name actually used */ const char * pcmk__update_acl_user(xmlNode *request, const char *field, const char *peer_user) { static const char *effective_user = NULL; const char *requested_user = NULL; const char *user = NULL; if (effective_user == NULL) { effective_user = pcmk__uid2username(geteuid()); if (effective_user == NULL) { effective_user = strdup("#unprivileged"); CRM_CHECK(effective_user != NULL, return NULL); crm_err("Unable to determine effective user, assuming unprivileged for ACLs"); } } requested_user = crm_element_value(request, XML_ACL_TAG_USER); if (requested_user == NULL) { /* @COMPAT rolling upgrades <=1.1.11 * * field is checked for backward compatibility with older versions that * did not use XML_ACL_TAG_USER. */ requested_user = crm_element_value(request, field); } if (!pcmk__is_privileged(effective_user)) { /* We're not running as a privileged user, set or overwrite any existing * value for $XML_ACL_TAG_USER */ user = effective_user; } else if (peer_user == NULL && requested_user == NULL) { /* No user known or requested, use 'effective_user' and make sure one is * set for the request */ user = effective_user; } else if (peer_user == NULL) { /* No user known, trusting 'requested_user' */ user = requested_user; } else if (!pcmk__is_privileged(peer_user)) { /* The peer is not a privileged user, set or overwrite any existing * value for $XML_ACL_TAG_USER */ user = peer_user; } else if (requested_user == NULL) { /* Even if we're privileged, make sure there is always a value set */ user = peer_user; } else { /* Legal delegation to 'requested_user' */ user = requested_user; } // This requires pointer comparison, not string comparison if (user != crm_element_value(request, XML_ACL_TAG_USER)) { crm_xml_add(request, XML_ACL_TAG_USER, user); } if (field != NULL && user != crm_element_value(request, field)) { crm_xml_add(request, field, user); } return requested_user; } #endif diff --git a/lib/common/ipc_client.c b/lib/common/ipc_client.c index 1029d46bc7..f34f791282 100644 --- a/lib/common/ipc_client.c +++ b/lib/common/ipc_client.c @@ -1,1462 +1,1459 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #if defined(US_AUTH_PEERCRED_UCRED) || defined(US_AUTH_PEERCRED_SOCKPEERCRED) # ifdef US_AUTH_PEERCRED_UCRED # ifndef _GNU_SOURCE # define _GNU_SOURCE # endif # endif # include #elif defined(US_AUTH_GETPEERUCRED) # include #endif #include #include #include #include #include /* indirectly: pcmk_err_generic */ #include #include #include #include "crmcommon_private.h" /*! * \brief Create a new object for using Pacemaker daemon IPC * * \param[out] api Where to store new IPC object * \param[in] server Which Pacemaker daemon the object is for * * \return Standard Pacemaker result code * * \note The caller is responsible for freeing *api using pcmk_free_ipc_api(). * \note This is intended to supersede crm_ipc_new() but currently only * supports the controller & pacemakerd IPC API. */ int pcmk_new_ipc_api(pcmk_ipc_api_t **api, enum pcmk_ipc_server server) { if (api == NULL) { return EINVAL; } *api = calloc(1, sizeof(pcmk_ipc_api_t)); if (*api == NULL) { return errno; } (*api)->server = server; if (pcmk_ipc_name(*api, false) == NULL) { pcmk_free_ipc_api(*api); *api = NULL; return EOPNOTSUPP; } (*api)->ipc_size_max = 0; // Set server methods and max_size (if not default) switch (server) { case pcmk_ipc_attrd: break; case pcmk_ipc_based: (*api)->ipc_size_max = 512 * 1024; // 512KB break; case pcmk_ipc_controld: (*api)->cmds = pcmk__controld_api_methods(); break; case pcmk_ipc_execd: break; case pcmk_ipc_fenced: break; case pcmk_ipc_pacemakerd: (*api)->cmds = pcmk__pacemakerd_api_methods(); break; case pcmk_ipc_schedulerd: // @TODO max_size could vary by client, maybe take as argument? (*api)->ipc_size_max = 5 * 1024 * 1024; // 5MB break; } if ((*api)->cmds == NULL) { pcmk_free_ipc_api(*api); *api = NULL; return ENOMEM; } (*api)->ipc = crm_ipc_new(pcmk_ipc_name(*api, false), (*api)->ipc_size_max); if ((*api)->ipc == NULL) { pcmk_free_ipc_api(*api); *api = NULL; return ENOMEM; } // If daemon API has its own data to track, allocate it if ((*api)->cmds->new_data != NULL) { if ((*api)->cmds->new_data(*api) != pcmk_rc_ok) { pcmk_free_ipc_api(*api); *api = NULL; return ENOMEM; } } crm_trace("Created %s API IPC object", pcmk_ipc_name(*api, true)); return pcmk_rc_ok; } static void free_daemon_specific_data(pcmk_ipc_api_t *api) { if ((api != NULL) && (api->cmds != NULL)) { if ((api->cmds->free_data != NULL) && (api->api_data != NULL)) { api->cmds->free_data(api->api_data); api->api_data = NULL; } free(api->cmds); api->cmds = NULL; } } /*! * \internal * \brief Call an IPC API event callback, if one is registed * * \param[in] api IPC API connection * \param[in] event_type The type of event that occurred * \param[in] status Event status * \param[in] event_data Event-specific data */ void pcmk__call_ipc_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data) { if ((api != NULL) && (api->cb != NULL)) { api->cb(api, event_type, status, event_data, api->user_data); } } /*! * \internal * \brief Clean up after an IPC disconnect * * \param[in] user_data IPC API connection that disconnected * * \note This function can be used as a main loop IPC destroy callback. */ static void ipc_post_disconnect(gpointer user_data) { pcmk_ipc_api_t *api = user_data; crm_info("Disconnected from %s IPC API", pcmk_ipc_name(api, true)); // Perform any daemon-specific handling needed if ((api->cmds != NULL) && (api->cmds->post_disconnect != NULL)) { api->cmds->post_disconnect(api); } // Call client's registered event callback pcmk__call_ipc_callback(api, pcmk_ipc_event_disconnect, CRM_EX_DISCONNECT, NULL); /* If this is being called from a running main loop, mainloop_gio_destroy() * will free ipc and mainloop_io immediately after calling this function. * If this is called from a stopped main loop, these will leak, so the best * practice is to close the connection before stopping the main loop. */ api->ipc = NULL; api->mainloop_io = NULL; if (api->free_on_disconnect) { /* pcmk_free_ipc_api() has already been called, but did not free api * or api->cmds because this function needed them. Do that now. */ free_daemon_specific_data(api); crm_trace("Freeing IPC API object after disconnect"); free(api); } } /*! * \brief Free the contents of an IPC API object * * \param[in] api IPC API object to free */ void pcmk_free_ipc_api(pcmk_ipc_api_t *api) { bool free_on_disconnect = false; if (api == NULL) { return; } crm_debug("Releasing %s IPC API", pcmk_ipc_name(api, true)); if (api->ipc != NULL) { if (api->mainloop_io != NULL) { /* We need to keep the api pointer itself around, because it is the * user data for the IPC client destroy callback. That will be * triggered by the pcmk_disconnect_ipc() call below, but it might * happen later in the main loop (if still running). * * This flag tells the destroy callback to free the object. It can't * do that unconditionally, because the application might call this * function after a disconnect that happened by other means. */ free_on_disconnect = api->free_on_disconnect = true; } pcmk_disconnect_ipc(api); // Frees api if free_on_disconnect is true } if (!free_on_disconnect) { free_daemon_specific_data(api); crm_trace("Freeing IPC API object"); free(api); } } /*! * \brief Get the IPC name used with an IPC API connection * * \param[in] api IPC API connection * \param[in] for_log If true, return human-friendly name instead of IPC name * * \return IPC API's human-friendly or connection name, or if none is available, * "Pacemaker" if for_log is true and NULL if for_log is false */ const char * pcmk_ipc_name(pcmk_ipc_api_t *api, bool for_log) { if (api == NULL) { return for_log? "Pacemaker" : NULL; } switch (api->server) { case pcmk_ipc_attrd: return for_log? "attribute manager" : NULL /* T_ATTRD */; case pcmk_ipc_based: return for_log? "CIB manager" : NULL /* PCMK__SERVER_BASED_RW */; case pcmk_ipc_controld: return for_log? "controller" : CRM_SYSTEM_CRMD; case pcmk_ipc_execd: return for_log? "executor" : NULL /* CRM_SYSTEM_LRMD */; case pcmk_ipc_fenced: return for_log? "fencer" : NULL /* "stonith-ng" */; case pcmk_ipc_pacemakerd: return for_log? "launcher" : CRM_SYSTEM_MCP; case pcmk_ipc_schedulerd: return for_log? "scheduler" : NULL /* CRM_SYSTEM_PENGINE */; default: return for_log? "Pacemaker" : NULL; } } /*! * \brief Check whether an IPC API connection is active * * \param[in] api IPC API connection * * \return true if IPC is connected, false otherwise */ bool pcmk_ipc_is_connected(pcmk_ipc_api_t *api) { return (api != NULL) && crm_ipc_connected(api->ipc); } /*! * \internal * \brief Call the daemon-specific API's dispatch function * * Perform daemon-specific handling of IPC reply dispatch. It is the daemon * method's responsibility to call the client's registered event callback, as * well as allocate and free any event data. * * \param[in] api IPC API connection */ static void call_api_dispatch(pcmk_ipc_api_t *api, xmlNode *message) { crm_log_xml_trace(message, "ipc-received"); if ((api->cmds != NULL) && (api->cmds->dispatch != NULL)) { api->cmds->dispatch(api, message); } } /*! * \internal * \brief Dispatch data read from IPC source * * \param[in] buffer Data read from IPC * \param[in] length Number of bytes of data in buffer (ignored) * \param[in] user_data IPC object * * \return Always 0 (meaning connection is still required) * * \note This function can be used as a main loop IPC dispatch callback. */ static int dispatch_ipc_data(const char *buffer, ssize_t length, gpointer user_data) { pcmk_ipc_api_t *api = user_data; xmlNode *msg; CRM_CHECK(api != NULL, return 0); if (buffer == NULL) { crm_warn("Empty message received from %s IPC", pcmk_ipc_name(api, true)); return 0; } msg = string2xml(buffer); if (msg == NULL) { crm_warn("Malformed message received from %s IPC", pcmk_ipc_name(api, true)); return 0; } call_api_dispatch(api, msg); free_xml(msg); return 0; } /*! * \brief Check whether an IPC connection has data available (without main loop) * * \param[in] api IPC API connection * \param[in] timeout_ms If less than 0, poll indefinitely; if 0, poll once * and return immediately; otherwise, poll for up to * this many milliseconds * * \return Standard Pacemaker return code * * \note Callers of pcmk_connect_ipc() using pcmk_ipc_dispatch_poll should call * this function to check whether IPC data is available. Return values of * interest include pcmk_rc_ok meaning data is available, and EAGAIN * meaning no data is available; all other values indicate errors. * \todo This does not allow the caller to poll multiple file descriptors at * once. If there is demand for that, we could add a wrapper for * crm_ipc_get_fd(api->ipc), so the caller can call poll() themselves. */ int pcmk_poll_ipc(pcmk_ipc_api_t *api, int timeout_ms) { int rc; struct pollfd pollfd = { 0, }; if ((api == NULL) || (api->dispatch_type != pcmk_ipc_dispatch_poll)) { return EINVAL; } pollfd.fd = crm_ipc_get_fd(api->ipc); pollfd.events = POLLIN; rc = poll(&pollfd, 1, timeout_ms); if (rc < 0) { return errno; } else if (rc == 0) { return EAGAIN; } return pcmk_rc_ok; } /*! * \brief Dispatch available messages on an IPC connection (without main loop) * * \param[in] api IPC API connection * * \return Standard Pacemaker return code * * \note Callers of pcmk_connect_ipc() using pcmk_ipc_dispatch_poll should call * this function when IPC data is available. */ void pcmk_dispatch_ipc(pcmk_ipc_api_t *api) { if (api == NULL) { return; } while (crm_ipc_ready(api->ipc) > 0) { if (crm_ipc_read(api->ipc) > 0) { dispatch_ipc_data(crm_ipc_buffer(api->ipc), 0, api); } } } // \return Standard Pacemaker return code static int connect_with_main_loop(pcmk_ipc_api_t *api) { int rc; struct ipc_client_callbacks callbacks = { .dispatch = dispatch_ipc_data, .destroy = ipc_post_disconnect, }; rc = pcmk__add_mainloop_ipc(api->ipc, G_PRIORITY_DEFAULT, api, &callbacks, &(api->mainloop_io)); if (rc != pcmk_rc_ok) { return rc; } crm_debug("Connected to %s IPC (attached to main loop)", pcmk_ipc_name(api, true)); /* After this point, api->mainloop_io owns api->ipc, so api->ipc * should not be explicitly freed. */ return pcmk_rc_ok; } // \return Standard Pacemaker return code static int connect_without_main_loop(pcmk_ipc_api_t *api) { int rc; if (!crm_ipc_connect(api->ipc)) { rc = errno; crm_ipc_close(api->ipc); return rc; } crm_debug("Connected to %s IPC (without main loop)", pcmk_ipc_name(api, true)); return pcmk_rc_ok; } /*! * \brief Connect to a Pacemaker daemon via IPC * * \param[in] api IPC API instance * \param[out] dispatch_type How IPC replies should be dispatched * * \return Standard Pacemaker return code */ int pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type) { int rc = pcmk_rc_ok; if (api == NULL) { crm_err("Cannot connect to uninitialized API object"); return EINVAL; } if (api->ipc == NULL) { api->ipc = crm_ipc_new(pcmk_ipc_name(api, false), api->ipc_size_max); if (api->ipc == NULL) { crm_err("Failed to re-create IPC API"); return ENOMEM; } } if (crm_ipc_connected(api->ipc)) { crm_trace("Already connected to %s IPC API", pcmk_ipc_name(api, true)); return pcmk_rc_ok; } api->dispatch_type = dispatch_type; switch (dispatch_type) { case pcmk_ipc_dispatch_main: rc = connect_with_main_loop(api); break; case pcmk_ipc_dispatch_sync: case pcmk_ipc_dispatch_poll: rc = connect_without_main_loop(api); break; } if (rc != pcmk_rc_ok) { return rc; } if ((api->cmds != NULL) && (api->cmds->post_connect != NULL)) { rc = api->cmds->post_connect(api); if (rc != pcmk_rc_ok) { crm_ipc_close(api->ipc); } } return rc; } /*! * \brief Disconnect an IPC API instance * * \param[in] api IPC API connection * * \return Standard Pacemaker return code * * \note If the connection is attached to a main loop, this function should be * called before quitting the main loop, to ensure that all memory is * freed. */ void pcmk_disconnect_ipc(pcmk_ipc_api_t *api) { if ((api == NULL) || (api->ipc == NULL)) { return; } switch (api->dispatch_type) { case pcmk_ipc_dispatch_main: { mainloop_io_t *mainloop_io = api->mainloop_io; // Make sure no code with access to api can use these again api->mainloop_io = NULL; api->ipc = NULL; mainloop_del_ipc_client(mainloop_io); // After this point api might have already been freed } break; case pcmk_ipc_dispatch_poll: case pcmk_ipc_dispatch_sync: { crm_ipc_t *ipc = api->ipc; // Make sure no code with access to api can use ipc again api->ipc = NULL; // This should always be the case already, but to be safe api->free_on_disconnect = false; crm_ipc_destroy(ipc); ipc_post_disconnect(api); } break; } } /*! * \brief Register a callback for IPC API events * * \param[in] api IPC API connection * \param[in] callback Callback to register * \param[in] userdata Caller data to pass to callback * * \note This function may be called multiple times to update the callback * and/or user data. The caller remains responsible for freeing * userdata in any case (after the IPC is disconnected, if the * user data is still registered with the IPC). */ void pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb, void *user_data) { if (api == NULL) { return; } api->cb = cb; api->user_data = user_data; } /*! * \internal * \brief Send an XML request across an IPC API connection * * \param[in] api IPC API connection * \param[in] request XML request to send * * \return Standard Pacemaker return code * * \note Daemon-specific IPC API functions should call this function to send * requests, because it handles different dispatch types appropriately. */ int pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request) { int rc; xmlNode *reply = NULL; enum crm_ipc_flags flags = crm_ipc_flags_none; if ((api == NULL) || (api->ipc == NULL) || (request == NULL)) { return EINVAL; } crm_log_xml_trace(request, "ipc-sent"); // Synchronous dispatch requires waiting for a reply if ((api->dispatch_type == pcmk_ipc_dispatch_sync) && (api->cmds != NULL) && (api->cmds->reply_expected != NULL) && (api->cmds->reply_expected(api, request))) { flags = crm_ipc_client_response; } // The 0 here means a default timeout of 5 seconds rc = crm_ipc_send(api->ipc, request, flags, 0, &reply); if (rc < 0) { return pcmk_legacy2rc(rc); } else if (rc == 0) { return ENODATA; } // With synchronous dispatch, we dispatch any reply now if (reply != NULL) { call_api_dispatch(api, reply); free_xml(reply); } return pcmk_rc_ok; } /*! * \internal * \brief Create the XML for an IPC request to purge a node from the peer cache * * \param[in] api IPC API connection * \param[in] node_name If not NULL, name of node to purge * \param[in] nodeid If not 0, node ID of node to purge * * \return Newly allocated IPC request XML * * \note The controller, fencer, and pacemakerd use the same request syntax, but * the attribute manager uses a different one. The CIB manager doesn't * have any syntax for it. The executor and scheduler don't connect to the * cluster layer and thus don't have or need any syntax for it. * * \todo Modify the attribute manager to accept the common syntax (as well * as its current one, for compatibility with older clients). Modify * the CIB manager to accept and honor the common syntax. Modify the * executor and scheduler to accept the syntax (immediately returning * success), just for consistency. Modify this function to use the * common syntax with all daemons if their version supports it. */ static xmlNode * create_purge_node_request(pcmk_ipc_api_t *api, const char *node_name, uint32_t nodeid) { xmlNode *request = NULL; const char *client = crm_system_name? crm_system_name : "client"; switch (api->server) { case pcmk_ipc_attrd: request = create_xml_node(NULL, __func__); crm_xml_add(request, F_TYPE, T_ATTRD); crm_xml_add(request, F_ORIG, crm_system_name); crm_xml_add(request, PCMK__XA_TASK, PCMK__ATTRD_CMD_PEER_REMOVE); crm_xml_add(request, PCMK__XA_ATTR_NODE_NAME, node_name); if (nodeid > 0) { crm_xml_add_int(request, PCMK__XA_ATTR_NODE_ID, (int) nodeid); } break; case pcmk_ipc_controld: case pcmk_ipc_fenced: case pcmk_ipc_pacemakerd: request = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, pcmk_ipc_name(api, false), client, NULL); if (nodeid > 0) { crm_xml_set_id(request, "%lu", (unsigned long) nodeid); } crm_xml_add(request, XML_ATTR_UNAME, node_name); break; case pcmk_ipc_based: case pcmk_ipc_execd: case pcmk_ipc_schedulerd: break; } return request; } /*! * \brief Ask a Pacemaker daemon to purge a node from its peer cache * * \param[in] api IPC API connection * \param[in] node_name If not NULL, name of node to purge * \param[in] nodeid If not 0, node ID of node to purge * * \return Standard Pacemaker return code * * \note At least one of node_name or nodeid must be specified. */ int pcmk_ipc_purge_node(pcmk_ipc_api_t *api, const char *node_name, uint32_t nodeid) { int rc = 0; xmlNode *request = NULL; if (api == NULL) { return EINVAL; } if ((node_name == NULL) && (nodeid == 0)) { return EINVAL; } request = create_purge_node_request(api, node_name, nodeid); if (request == NULL) { return EOPNOTSUPP; } rc = pcmk__send_ipc_request(api, request); free_xml(request); crm_debug("%s peer cache purge of node %s[%lu]: rc=%d", pcmk_ipc_name(api, true), node_name, (unsigned long) nodeid, rc); return rc; } /* * Generic IPC API (to eventually be deprecated as public API and made internal) */ struct crm_ipc_s { struct pollfd pfd; unsigned int max_buf_size; // maximum bytes we can send or receive over IPC unsigned int buf_size; // size of allocated buffer int msg_size; int need_reply; char *buffer; char *name; qb_ipcc_connection_t *ipc; }; /*! * \brief Create a new (legacy) object for using Pacemaker daemon IPC * * \param[in] name IPC system name to connect to * \param[in] max_size Use a maximum IPC buffer size of at least this size * * \return Newly allocated IPC object on success, NULL otherwise * * \note The caller is responsible for freeing the result using * crm_ipc_destroy(). * \note This should be considered deprecated for use with daemons supported by * pcmk_new_ipc_api(). */ crm_ipc_t * crm_ipc_new(const char *name, size_t max_size) { crm_ipc_t *client = NULL; client = calloc(1, sizeof(crm_ipc_t)); if (client == NULL) { crm_err("Could not create IPC connection: %s", strerror(errno)); return NULL; } client->name = strdup(name); if (client->name == NULL) { crm_err("Could not create IPC connection: %s", strerror(errno)); free(client); return NULL; } client->buf_size = pcmk__ipc_buffer_size(max_size); client->buffer = malloc(client->buf_size); if (client->buffer == NULL) { crm_err("Could not create IPC connection: %s", strerror(errno)); free(client->name); free(client); return NULL; } /* Clients initiating connection pick the max buf size */ client->max_buf_size = client->buf_size; client->pfd.fd = -1; client->pfd.events = POLLIN; client->pfd.revents = 0; return client; } /*! * \brief Establish an IPC connection to a Pacemaker component * * \param[in] client Connection instance obtained from crm_ipc_new() * * \return TRUE on success, FALSE otherwise (in which case errno will be set; * specifically, in case of discovering the remote side is not * authentic, its value is set to ECONNABORTED). */ bool crm_ipc_connect(crm_ipc_t * client) { uid_t cl_uid = 0; gid_t cl_gid = 0; pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; int rv; client->need_reply = FALSE; client->ipc = qb_ipcc_connect(client->name, client->buf_size); if (client->ipc == NULL) { crm_debug("Could not establish %s connection: %s (%d)", client->name, pcmk_strerror(errno), errno); return FALSE; } client->pfd.fd = crm_ipc_get_fd(client); if (client->pfd.fd < 0) { rv = errno; /* message already omitted */ crm_ipc_close(client); errno = rv; return FALSE; } rv = pcmk_daemon_user(&cl_uid, &cl_gid); if (rv < 0) { /* message already omitted */ crm_ipc_close(client); errno = -rv; return FALSE; } if ((rv = pcmk__crm_ipc_is_authentic_process(client->ipc, client->pfd.fd, cl_uid, cl_gid, &found_pid, &found_uid, &found_gid)) == pcmk_rc_ipc_unauthorized) { crm_err("Daemon (IPC %s) is not authentic:" " process %lld (uid: %lld, gid: %lld)", client->name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid), (long long) found_uid, (long long) found_gid); crm_ipc_close(client); errno = ECONNABORTED; return FALSE; } else if (rv != pcmk_rc_ok) { crm_perror(LOG_ERR, "Could not verify authenticity of daemon (IPC %s)", client->name); crm_ipc_close(client); if (rv > 0) { errno = rv; } else { errno = ENOTCONN; } return FALSE; } qb_ipcc_context_set(client->ipc, client); -#ifdef HAVE_IPCS_GET_BUFFER_SIZE client->max_buf_size = qb_ipcc_get_buffer_size(client->ipc); if (client->max_buf_size > client->buf_size) { free(client->buffer); client->buffer = calloc(1, client->max_buf_size); client->buf_size = client->max_buf_size; } -#endif - return TRUE; } void crm_ipc_close(crm_ipc_t * client) { if (client) { if (client->ipc) { qb_ipcc_connection_t *ipc = client->ipc; client->ipc = NULL; qb_ipcc_disconnect(ipc); } } } void crm_ipc_destroy(crm_ipc_t * client) { if (client) { if (client->ipc && qb_ipcc_is_connected(client->ipc)) { crm_notice("Destroying an active IPC connection to %s", client->name); /* The next line is basically unsafe * * If this connection was attached to mainloop and mainloop is active, * the 'disconnected' callback will end up back here and we'll end * up free'ing the memory twice - something that can still happen * even without this if we destroy a connection and it closes before * we call exit */ /* crm_ipc_close(client); */ } crm_trace("Destroying IPC connection to %s: %p", client->name, client); free(client->buffer); free(client->name); free(client); } } int crm_ipc_get_fd(crm_ipc_t * client) { int fd = 0; if (client && client->ipc && (qb_ipcc_fd_get(client->ipc, &fd) == 0)) { return fd; } errno = EINVAL; crm_perror(LOG_ERR, "Could not obtain file IPC descriptor for %s", (client? client->name : "unspecified client")); return -errno; } bool crm_ipc_connected(crm_ipc_t * client) { bool rc = FALSE; if (client == NULL) { crm_trace("No client"); return FALSE; } else if (client->ipc == NULL) { crm_trace("No connection"); return FALSE; } else if (client->pfd.fd < 0) { crm_trace("Bad descriptor"); return FALSE; } rc = qb_ipcc_is_connected(client->ipc); if (rc == FALSE) { client->pfd.fd = -EINVAL; } return rc; } /*! * \brief Check whether an IPC connection is ready to be read * * \param[in] client Connection to check * * \return Positive value if ready to be read, 0 if not ready, -errno on error */ int crm_ipc_ready(crm_ipc_t *client) { int rc; CRM_ASSERT(client != NULL); if (crm_ipc_connected(client) == FALSE) { return -ENOTCONN; } client->pfd.revents = 0; rc = poll(&(client->pfd), 1, 0); return (rc < 0)? -errno : rc; } // \return Standard Pacemaker return code static int crm_ipc_decompress(crm_ipc_t * client) { pcmk__ipc_header_t *header = (pcmk__ipc_header_t *)(void*)client->buffer; if (header->size_compressed) { int rc = 0; unsigned int size_u = 1 + header->size_uncompressed; /* never let buf size fall below our max size required for ipc reads. */ unsigned int new_buf_size = QB_MAX((sizeof(pcmk__ipc_header_t) + size_u), client->max_buf_size); char *uncompressed = calloc(1, new_buf_size); crm_trace("Decompressing message data %u bytes into %u bytes", header->size_compressed, size_u); rc = BZ2_bzBuffToBuffDecompress(uncompressed + sizeof(pcmk__ipc_header_t), &size_u, client->buffer + sizeof(pcmk__ipc_header_t), header->size_compressed, 1, 0); if (rc != BZ_OK) { crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", bz2_strerror(rc), rc); free(uncompressed); return EILSEQ; } /* * This assert no longer holds true. For an identical msg, some clients may * require compression, and others may not. If that same msg (event) is sent * to multiple clients, it could result in some clients receiving a compressed * msg even though compression was not explicitly required for them. * * CRM_ASSERT((header->size_uncompressed + sizeof(pcmk__ipc_header_t)) >= ipc_buffer_max); */ CRM_ASSERT(size_u == header->size_uncompressed); memcpy(uncompressed, client->buffer, sizeof(pcmk__ipc_header_t)); /* Preserve the header */ header = (pcmk__ipc_header_t *)(void*)uncompressed; free(client->buffer); client->buf_size = new_buf_size; client->buffer = uncompressed; } CRM_ASSERT(client->buffer[sizeof(pcmk__ipc_header_t) + header->size_uncompressed - 1] == 0); return pcmk_rc_ok; } long crm_ipc_read(crm_ipc_t * client) { pcmk__ipc_header_t *header = NULL; CRM_ASSERT(client != NULL); CRM_ASSERT(client->ipc != NULL); CRM_ASSERT(client->buffer != NULL); client->buffer[0] = 0; client->msg_size = qb_ipcc_event_recv(client->ipc, client->buffer, client->buf_size, 0); if (client->msg_size >= 0) { int rc = crm_ipc_decompress(client); if (rc != pcmk_rc_ok) { return pcmk_rc2legacy(rc); } header = (pcmk__ipc_header_t *)(void*)client->buffer; if (!pcmk__valid_ipc_header(header)) { return -EBADMSG; } crm_trace("Received %s event %d, size=%u, rc=%d, text: %.100s", client->name, header->qb.id, header->qb.size, client->msg_size, client->buffer + sizeof(pcmk__ipc_header_t)); } else { crm_trace("No message from %s received: %s", client->name, pcmk_strerror(client->msg_size)); } if (crm_ipc_connected(client) == FALSE || client->msg_size == -ENOTCONN) { crm_err("Connection to %s failed", client->name); } if (header) { /* Data excluding the header */ return header->size_uncompressed; } return -ENOMSG; } const char * crm_ipc_buffer(crm_ipc_t * client) { CRM_ASSERT(client != NULL); return client->buffer + sizeof(pcmk__ipc_header_t); } uint32_t crm_ipc_buffer_flags(crm_ipc_t * client) { pcmk__ipc_header_t *header = NULL; CRM_ASSERT(client != NULL); if (client->buffer == NULL) { return 0; } header = (pcmk__ipc_header_t *)(void*)client->buffer; return header->flags; } const char * crm_ipc_name(crm_ipc_t * client) { CRM_ASSERT(client != NULL); return client->name; } // \return Standard Pacemaker return code static int internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout, ssize_t *bytes) { time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); int rc = pcmk_rc_ok; /* get the reply */ crm_trace("client %s waiting on reply to msg id %d", client->name, request_id); do { *bytes = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, 1000); if (*bytes > 0) { pcmk__ipc_header_t *hdr = NULL; rc = crm_ipc_decompress(client); if (rc != pcmk_rc_ok) { return rc; } hdr = (pcmk__ipc_header_t *)(void*)client->buffer; if (hdr->qb.id == request_id) { /* Got it */ break; } else if (hdr->qb.id < request_id) { xmlNode *bad = string2xml(crm_ipc_buffer(client)); crm_err("Discarding old reply %d (need %d)", hdr->qb.id, request_id); crm_log_xml_notice(bad, "OldIpcReply"); } else { xmlNode *bad = string2xml(crm_ipc_buffer(client)); crm_err("Discarding newer reply %d (need %d)", hdr->qb.id, request_id); crm_log_xml_notice(bad, "ImpossibleReply"); CRM_ASSERT(hdr->qb.id <= request_id); } } else if (crm_ipc_connected(client) == FALSE) { crm_err("Server disconnected client %s while waiting for msg id %d", client->name, request_id); break; } } while (time(NULL) < timeout); if (*bytes < 0) { rc = (int) -*bytes; // System errno } return rc; } /*! * \brief Send an IPC XML message * * \param[in] client Connection to IPC server * \param[in] message XML message to send * \param[in] flags Bitmask of crm_ipc_flags * \param[in] ms_timeout Give up if not sent within this much time * (5 seconds if 0, or no timeout if negative) * \param[out] reply Reply from server (or NULL if none) * * \return Negative errno on error, otherwise size of reply received in bytes * if reply was needed, otherwise number of bytes sent */ int crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode ** reply) { int rc = 0; ssize_t qb_rc = 0; ssize_t bytes = 0; struct iovec *iov; static uint32_t id = 0; static int factor = 8; pcmk__ipc_header_t *header; if (client == NULL) { crm_notice("Can't send IPC request without connection (bug?): %.100s", message); return -ENOTCONN; } else if (crm_ipc_connected(client) == FALSE) { /* Don't even bother */ crm_notice("Can't send IPC request to %s: Connection closed", client->name); return -ENOTCONN; } if (ms_timeout == 0) { ms_timeout = 5000; } if (client->need_reply) { qb_rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, ms_timeout); if (qb_rc < 0) { crm_warn("Sending IPC to %s disabled until pending reply received", client->name); return -EALREADY; } else { crm_notice("Sending IPC to %s re-enabled after pending reply received", client->name); client->need_reply = FALSE; } } id++; CRM_LOG_ASSERT(id != 0); /* Crude wrap-around detection */ rc = pcmk__ipc_prepare_iov(id, message, client->max_buf_size, &iov, &bytes); if (rc != pcmk_rc_ok) { crm_warn("Couldn't prepare IPC request to %s: %s " CRM_XS " rc=%d", client->name, pcmk_rc_str(rc), rc); return pcmk_rc2legacy(rc); } header = iov[0].iov_base; pcmk__set_ipc_flags(header->flags, client->name, flags); if (pcmk_is_set(flags, crm_ipc_proxied)) { /* Don't look for a synchronous response */ pcmk__clear_ipc_flags(flags, "client", crm_ipc_client_response); } if(header->size_compressed) { if(factor < 10 && (client->max_buf_size / 10) < (bytes / factor)) { crm_notice("Compressed message exceeds %d0%% of configured IPC " "limit (%u bytes); consider setting PCMK_ipc_buffer to " "%u or higher", factor, client->max_buf_size, 2 * client->max_buf_size); factor++; } } crm_trace("Sending %s IPC request %d of %u bytes using %dms timeout", client->name, header->qb.id, header->qb.size, ms_timeout); if ((ms_timeout > 0) || !pcmk_is_set(flags, crm_ipc_client_response)) { time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); do { /* @TODO Is this check really needed? Won't qb_ipcc_sendv() return * an error if it's not connected? */ if (!crm_ipc_connected(client)) { goto send_cleanup; } qb_rc = qb_ipcc_sendv(client->ipc, iov, 2); } while ((qb_rc == -EAGAIN) && (time(NULL) < timeout)); rc = (int) qb_rc; // Negative of system errno, or bytes sent if (qb_rc <= 0) { goto send_cleanup; } else if (!pcmk_is_set(flags, crm_ipc_client_response)) { crm_trace("Not waiting for reply to %s IPC request %d", client->name, header->qb.id); goto send_cleanup; } rc = internal_ipc_get_reply(client, header->qb.id, ms_timeout, &bytes); if (rc != pcmk_rc_ok) { /* We didn't get the reply in time, so disable future sends for now. * The only alternative would be to close the connection since we * don't know how to detect and discard out-of-sequence replies. * * @TODO Implement out-of-sequence detection */ client->need_reply = TRUE; } rc = (int) bytes; // Negative system errno, or size of reply received } else { // No timeout, and client response needed do { qb_rc = qb_ipcc_sendv_recv(client->ipc, iov, 2, client->buffer, client->buf_size, -1); } while ((qb_rc == -EAGAIN) && crm_ipc_connected(client)); rc = (int) qb_rc; // Negative system errno, or size of reply received } if (rc > 0) { pcmk__ipc_header_t *hdr = (pcmk__ipc_header_t *)(void*)client->buffer; crm_trace("Received %d-byte reply %d to %s IPC %d: %.100s", rc, hdr->qb.id, client->name, header->qb.id, crm_ipc_buffer(client)); if (reply) { *reply = string2xml(crm_ipc_buffer(client)); } } else { crm_trace("No reply to %s IPC %d: rc=%d", client->name, header->qb.id, rc); } send_cleanup: if (crm_ipc_connected(client) == FALSE) { crm_notice("Couldn't send %s IPC request %d: Connection closed " CRM_XS " rc=%d", client->name, header->qb.id, rc); } else if (rc == -ETIMEDOUT) { crm_warn("%s IPC request %d failed: %s after %dms " CRM_XS " rc=%d", client->name, header->qb.id, pcmk_strerror(rc), ms_timeout, rc); crm_write_blackbox(0, NULL); } else if (rc <= 0) { crm_warn("%s IPC request %d failed: %s " CRM_XS " rc=%d", client->name, header->qb.id, ((rc == 0)? "No bytes sent" : pcmk_strerror(rc)), rc); } pcmk_free_ipc_event(iov); return rc; } int pcmk__crm_ipc_is_authentic_process(qb_ipcc_connection_t *qb_ipc, int sock, uid_t refuid, gid_t refgid, pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) { int ret = 0; pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; #if defined(US_AUTH_PEERCRED_UCRED) struct ucred ucred; socklen_t ucred_len = sizeof(ucred); #endif #ifdef HAVE_QB_IPCC_AUTH_GET if (qb_ipc && !qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid)) { goto do_checks; } #endif #if defined(US_AUTH_PEERCRED_UCRED) if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_len) && ucred_len == sizeof(ucred)) { found_pid = ucred.pid; found_uid = ucred.uid; found_gid = ucred.gid; #elif defined(US_AUTH_PEERCRED_SOCKPEERCRED) struct sockpeercred sockpeercred; socklen_t sockpeercred_len = sizeof(sockpeercred); if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &sockpeercred, &sockpeercred_len) && sockpeercred_len == sizeof(sockpeercred_len)) { found_pid = sockpeercred.pid; found_uid = sockpeercred.uid; found_gid = sockpeercred.gid; #elif defined(US_AUTH_GETPEEREID) if (!getpeereid(sock, &found_uid, &found_gid)) { found_pid = PCMK__SPECIAL_PID; /* cannot obtain PID (FreeBSD) */ #elif defined(US_AUTH_GETPEERUCRED) ucred_t *ucred; if (!getpeerucred(sock, &ucred)) { errno = 0; found_pid = ucred_getpid(ucred); found_uid = ucred_geteuid(ucred); found_gid = ucred_getegid(ucred); ret = -errno; ucred_free(ucred); if (ret) { return (ret < 0) ? ret : -pcmk_err_generic; } #else # error "No way to authenticate a Unix socket peer" errno = 0; if (0) { #endif #ifdef HAVE_QB_IPCC_AUTH_GET do_checks: #endif if (gotpid != NULL) { *gotpid = found_pid; } if (gotuid != NULL) { *gotuid = found_uid; } if (gotgid != NULL) { *gotgid = found_gid; } if (found_uid == 0 || found_uid == refuid || found_gid == refgid) { ret = 0; } else { ret = pcmk_rc_ipc_unauthorized; } } else { ret = (errno > 0) ? errno : pcmk_rc_error; } return ret; } int crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid, pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) { int ret = pcmk__crm_ipc_is_authentic_process(NULL, sock, refuid, refgid, gotpid, gotuid, gotgid); /* The old function had some very odd return codes*/ if (ret == 0) { return 1; } else if (ret == pcmk_rc_ipc_unauthorized) { return 0; } else { return pcmk_rc2legacy(ret); } } int pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid, gid_t refgid, pid_t *gotpid) { static char last_asked_name[PATH_MAX / 2] = ""; /* log spam prevention */ int fd; int rc = pcmk_rc_ipc_unresponsive; int auth_rc = 0; int32_t qb_rc; pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; qb_ipcc_connection_t *c; c = qb_ipcc_connect(name, 0); if (c == NULL) { crm_info("Could not connect to %s IPC: %s", name, strerror(errno)); rc = pcmk_rc_ipc_unresponsive; goto bail; } qb_rc = qb_ipcc_fd_get(c, &fd); if (qb_rc != 0) { rc = (int) -qb_rc; // System errno crm_err("Could not get fd from %s IPC: %s " CRM_XS " rc=%d", name, pcmk_rc_str(rc), rc); goto bail; } auth_rc = pcmk__crm_ipc_is_authentic_process(c, fd, refuid, refgid, &found_pid, &found_uid, &found_gid); if (auth_rc == pcmk_rc_ipc_unauthorized) { crm_err("Daemon (IPC %s) effectively blocked with unauthorized" " process %lld (uid: %lld, gid: %lld)", name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid), (long long) found_uid, (long long) found_gid); rc = pcmk_rc_ipc_unauthorized; goto bail; } if (auth_rc != pcmk_rc_ok) { rc = auth_rc; crm_err("Could not get peer credentials from %s IPC: %s " CRM_XS " rc=%d", name, pcmk_rc_str(rc), rc); goto bail; } if (gotpid != NULL) { *gotpid = found_pid; } rc = pcmk_rc_ok; if ((found_uid != refuid || found_gid != refgid) && strncmp(last_asked_name, name, sizeof(last_asked_name))) { if ((found_uid == 0) && (refuid != 0)) { crm_warn("Daemon (IPC %s) runs as root, whereas the expected" " credentials are %lld:%lld, hazard of violating" " the least privilege principle", name, (long long) refuid, (long long) refgid); } else { crm_notice("Daemon (IPC %s) runs as %lld:%lld, whereas the" " expected credentials are %lld:%lld, which may" " mean a different set of privileges than expected", name, (long long) found_uid, (long long) found_gid, (long long) refuid, (long long) refgid); } memccpy(last_asked_name, name, '\0', sizeof(last_asked_name)); } bail: if (c != NULL) { qb_ipcc_disconnect(c); } return rc; } diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c index cf9c99ef2a..2f00e314cd 100644 --- a/lib/common/mainloop.c +++ b/lib/common/mainloop.c @@ -1,1458 +1,1455 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include struct mainloop_child_s { pid_t pid; char *desc; unsigned timerid; gboolean timeout; void *privatedata; enum mainloop_child_flags flags; /* Called when a process dies */ void (*callback) (mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode); }; struct trigger_s { GSource source; gboolean running; gboolean trigger; void *user_data; guint id; }; static gboolean crm_trigger_prepare(GSource * source, gint * timeout) { crm_trigger_t *trig = (crm_trigger_t *) source; /* cluster-glue's FD and IPC related sources make use of * g_source_add_poll() but do not set a timeout in their prepare * functions * * This means mainloop's poll() will block until an event for one * of these sources occurs - any /other/ type of source, such as * this one or g_idle_*, that doesn't use g_source_add_poll() is * S-O-L and won't be processed until there is something fd-based * happens. * * Luckily the timeout we can set here affects all sources and * puts an upper limit on how long poll() can take. * * So unconditionally set a small-ish timeout, not too small that * we're in constant motion, which will act as an upper bound on * how long the signal handling might be delayed for. */ *timeout = 500; /* Timeout in ms */ return trig->trigger; } static gboolean crm_trigger_check(GSource * source) { crm_trigger_t *trig = (crm_trigger_t *) source; return trig->trigger; } static gboolean crm_trigger_dispatch(GSource * source, GSourceFunc callback, gpointer userdata) { int rc = TRUE; crm_trigger_t *trig = (crm_trigger_t *) source; if (trig->running) { /* Wait until the existing job is complete before starting the next one */ return TRUE; } trig->trigger = FALSE; if (callback) { rc = callback(trig->user_data); if (rc < 0) { crm_trace("Trigger handler %p not yet complete", trig); trig->running = TRUE; rc = TRUE; } } return rc; } static void crm_trigger_finalize(GSource * source) { crm_trace("Trigger %p destroyed", source); } static GSourceFuncs crm_trigger_funcs = { crm_trigger_prepare, crm_trigger_check, crm_trigger_dispatch, crm_trigger_finalize, }; static crm_trigger_t * mainloop_setup_trigger(GSource * source, int priority, int (*dispatch) (gpointer user_data), gpointer userdata) { crm_trigger_t *trigger = NULL; trigger = (crm_trigger_t *) source; trigger->id = 0; trigger->trigger = FALSE; trigger->user_data = userdata; if (dispatch) { g_source_set_callback(source, dispatch, trigger, NULL); } g_source_set_priority(source, priority); g_source_set_can_recurse(source, FALSE); trigger->id = g_source_attach(source, NULL); return trigger; } void mainloop_trigger_complete(crm_trigger_t * trig) { crm_trace("Trigger handler %p complete", trig); trig->running = FALSE; } /* If dispatch returns: * -1: Job running but not complete * 0: Remove the trigger from mainloop * 1: Leave the trigger in mainloop */ crm_trigger_t * mainloop_add_trigger(int priority, int (*dispatch) (gpointer user_data), gpointer userdata) { GSource *source = NULL; CRM_ASSERT(sizeof(crm_trigger_t) > sizeof(GSource)); source = g_source_new(&crm_trigger_funcs, sizeof(crm_trigger_t)); CRM_ASSERT(source != NULL); return mainloop_setup_trigger(source, priority, dispatch, userdata); } void mainloop_set_trigger(crm_trigger_t * source) { if(source) { source->trigger = TRUE; } } gboolean mainloop_destroy_trigger(crm_trigger_t * source) { GSource *gs = NULL; if(source == NULL) { return TRUE; } gs = (GSource *)source; g_source_destroy(gs); /* Remove from mainloop, ref_count-- */ g_source_unref(gs); /* The caller no longer carries a reference to source * * At this point the source should be free'd, * unless we're currently processing said * source, in which case mainloop holds an * additional reference and it will be free'd * once our processing completes */ return TRUE; } // Define a custom glib source for signal handling // Data structure for custom glib source typedef struct signal_s { crm_trigger_t trigger; // trigger that invoked source (must be first) void (*handler) (int sig); // signal handler int signal; // signal that was received } crm_signal_t; // Table to associate signal handlers with signal numbers static crm_signal_t *crm_signals[NSIG]; /*! * \internal * \brief Dispatch an event from custom glib source for signals * * Given an signal event, clear the event trigger and call any registered * signal handler. * * \param[in] source glib source that triggered this dispatch * \param[in] callback (ignored) * \param[in] userdata (ignored) */ static gboolean crm_signal_dispatch(GSource * source, GSourceFunc callback, gpointer userdata) { crm_signal_t *sig = (crm_signal_t *) source; if(sig->signal != SIGCHLD) { crm_notice("Caught '%s' signal "CRM_XS" %d (%s handler)", strsignal(sig->signal), sig->signal, (sig->handler? "invoking" : "no")); } sig->trigger.trigger = FALSE; if (sig->handler) { sig->handler(sig->signal); } return TRUE; } /*! * \internal * \brief Handle a signal by setting a trigger for signal source * * \param[in] sig Signal number that was received * * \note This is the true signal handler for the mainloop signal source, and * must be async-safe. */ static void mainloop_signal_handler(int sig) { if (sig > 0 && sig < NSIG && crm_signals[sig] != NULL) { mainloop_set_trigger((crm_trigger_t *) crm_signals[sig]); } } // Functions implementing our custom glib source for signal handling static GSourceFuncs crm_signal_funcs = { crm_trigger_prepare, crm_trigger_check, crm_signal_dispatch, crm_trigger_finalize, }; /*! * \internal * \brief Set a true signal handler * * signal()-like interface to sigaction() * * \param[in] sig Signal number to register handler for * \param[in] dispatch Signal handler * * \return The previous value of the signal handler, or SIG_ERR on error * \note The dispatch function must be async-safe. */ sighandler_t crm_signal_handler(int sig, sighandler_t dispatch) { sigset_t mask; struct sigaction sa; struct sigaction old; if (sigemptyset(&mask) < 0) { crm_err("Could not set handler for signal %d: %s", sig, pcmk_strerror(errno)); return SIG_ERR; } memset(&sa, 0, sizeof(struct sigaction)); sa.sa_handler = dispatch; sa.sa_flags = SA_RESTART; sa.sa_mask = mask; if (sigaction(sig, &sa, &old) < 0) { crm_err("Could not set handler for signal %d: %s", sig, pcmk_strerror(errno)); return SIG_ERR; } return old.sa_handler; } static void mainloop_destroy_signal_entry(int sig) { crm_signal_t *tmp = crm_signals[sig]; crm_signals[sig] = NULL; crm_trace("Destroying signal %d", sig); mainloop_destroy_trigger((crm_trigger_t *) tmp); } /*! * \internal * \brief Add a signal handler to a mainloop * * \param[in] sig Signal number to handle * \param[in] dispatch Signal handler function * * \note The true signal handler merely sets a mainloop trigger to call this * dispatch function via the mainloop. Therefore, the dispatch function * does not need to be async-safe. */ gboolean mainloop_add_signal(int sig, void (*dispatch) (int sig)) { GSource *source = NULL; int priority = G_PRIORITY_HIGH - 1; if (sig == SIGTERM) { /* TERM is higher priority than other signals, * signals are higher priority than other ipc. * Yes, minus: smaller is "higher" */ priority--; } if (sig >= NSIG || sig < 0) { crm_err("Signal %d is out of range", sig); return FALSE; } else if (crm_signals[sig] != NULL && crm_signals[sig]->handler == dispatch) { crm_trace("Signal handler for %d is already installed", sig); return TRUE; } else if (crm_signals[sig] != NULL) { crm_err("Different signal handler for %d is already installed", sig); return FALSE; } CRM_ASSERT(sizeof(crm_signal_t) > sizeof(GSource)); source = g_source_new(&crm_signal_funcs, sizeof(crm_signal_t)); crm_signals[sig] = (crm_signal_t *) mainloop_setup_trigger(source, priority, NULL, NULL); CRM_ASSERT(crm_signals[sig] != NULL); crm_signals[sig]->handler = dispatch; crm_signals[sig]->signal = sig; if (crm_signal_handler(sig, mainloop_signal_handler) == SIG_ERR) { mainloop_destroy_signal_entry(sig); return FALSE; } #if 0 /* If we want signals to interrupt mainloop's poll(), instead of waiting for * the timeout, then we should call siginterrupt() below * * For now, just enforce a low timeout */ if (siginterrupt(sig, 1) < 0) { crm_perror(LOG_INFO, "Could not enable system call interruptions for signal %d", sig); } #endif return TRUE; } gboolean mainloop_destroy_signal(int sig) { if (sig >= NSIG || sig < 0) { crm_err("Signal %d is out of range", sig); return FALSE; } else if (crm_signal_handler(sig, NULL) == SIG_ERR) { crm_perror(LOG_ERR, "Could not uninstall signal handler for signal %d", sig); return FALSE; } else if (crm_signals[sig] == NULL) { return TRUE; } mainloop_destroy_signal_entry(sig); return TRUE; } static qb_array_t *gio_map = NULL; void mainloop_cleanup(void) { if (gio_map) { qb_array_free(gio_map); } for (int sig = 0; sig < NSIG; ++sig) { mainloop_destroy_signal_entry(sig); } } /* * libqb... */ struct gio_to_qb_poll { int32_t is_used; guint source; int32_t events; void *data; qb_ipcs_dispatch_fn_t fn; enum qb_loop_priority p; }; static gboolean gio_read_socket(GIOChannel * gio, GIOCondition condition, gpointer data) { struct gio_to_qb_poll *adaptor = (struct gio_to_qb_poll *)data; gint fd = g_io_channel_unix_get_fd(gio); crm_trace("%p.%d %d", data, fd, condition); /* if this assert get's hit, then there is a race condition between * when we destroy a fd and when mainloop actually gives it up */ CRM_ASSERT(adaptor->is_used > 0); return (adaptor->fn(fd, condition, adaptor->data) == 0); } static void gio_poll_destroy(gpointer data) { struct gio_to_qb_poll *adaptor = (struct gio_to_qb_poll *)data; adaptor->is_used--; CRM_ASSERT(adaptor->is_used >= 0); if (adaptor->is_used == 0) { crm_trace("Marking adaptor %p unused", adaptor); adaptor->source = 0; } } /*! * \internal * \brief Convert libqb's poll priority into GLib's one * * \param[in] prio libqb's poll priority (#QB_LOOP_MED assumed as fallback) * * \return best matching GLib's priority */ static gint conv_prio_libqb2glib(enum qb_loop_priority prio) { gint ret = G_PRIORITY_DEFAULT; switch (prio) { case QB_LOOP_LOW: ret = G_PRIORITY_LOW; break; case QB_LOOP_HIGH: ret = G_PRIORITY_HIGH; break; default: crm_trace("Invalid libqb's loop priority %d, assuming QB_LOOP_MED", prio); /* fall-through */ case QB_LOOP_MED: break; } return ret; } /*! * \internal * \brief Convert libqb's poll priority to rate limiting spec * * \param[in] prio libqb's poll priority (#QB_LOOP_MED assumed as fallback) * * \return best matching rate limiting spec */ static enum qb_ipcs_rate_limit conv_libqb_prio2ratelimit(enum qb_loop_priority prio) { /* this is an inversion of what libqb's qb_ipcs_request_rate_limit does */ enum qb_ipcs_rate_limit ret = QB_IPCS_RATE_NORMAL; switch (prio) { case QB_LOOP_LOW: ret = QB_IPCS_RATE_SLOW; break; case QB_LOOP_HIGH: ret = QB_IPCS_RATE_FAST; break; default: crm_trace("Invalid libqb's loop priority %d, assuming QB_LOOP_MED", prio); /* fall-through */ case QB_LOOP_MED: break; } return ret; } static int32_t gio_poll_dispatch_update(enum qb_loop_priority p, int32_t fd, int32_t evts, void *data, qb_ipcs_dispatch_fn_t fn, int32_t add) { struct gio_to_qb_poll *adaptor; GIOChannel *channel; int32_t res = 0; res = qb_array_index(gio_map, fd, (void **)&adaptor); if (res < 0) { crm_err("Array lookup failed for fd=%d: %d", fd, res); return res; } crm_trace("Adding fd=%d to mainloop as adaptor %p", fd, adaptor); if (add && adaptor->source) { crm_err("Adaptor for descriptor %d is still in-use", fd); return -EEXIST; } if (!add && !adaptor->is_used) { crm_err("Adaptor for descriptor %d is not in-use", fd); return -ENOENT; } /* channel is created with ref_count = 1 */ channel = g_io_channel_unix_new(fd); if (!channel) { crm_err("No memory left to add fd=%d", fd); return -ENOMEM; } if (adaptor->source) { g_source_remove(adaptor->source); adaptor->source = 0; } /* Because unlike the poll() API, glib doesn't tell us about HUPs by default */ evts |= (G_IO_HUP | G_IO_NVAL | G_IO_ERR); adaptor->fn = fn; adaptor->events = evts; adaptor->data = data; adaptor->p = p; adaptor->is_used++; adaptor->source = g_io_add_watch_full(channel, conv_prio_libqb2glib(p), evts, gio_read_socket, adaptor, gio_poll_destroy); /* Now that mainloop now holds a reference to channel, * thanks to g_io_add_watch_full(), drop ours from g_io_channel_unix_new(). * * This means that channel will be free'd by: * g_main_context_dispatch() * -> g_source_destroy_internal() * -> g_source_callback_unref() * shortly after gio_poll_destroy() completes */ g_io_channel_unref(channel); crm_trace("Added to mainloop with gsource id=%d", adaptor->source); if (adaptor->source > 0) { return 0; } return -EINVAL; } static int32_t gio_poll_dispatch_add(enum qb_loop_priority p, int32_t fd, int32_t evts, void *data, qb_ipcs_dispatch_fn_t fn) { return gio_poll_dispatch_update(p, fd, evts, data, fn, QB_TRUE); } static int32_t gio_poll_dispatch_mod(enum qb_loop_priority p, int32_t fd, int32_t evts, void *data, qb_ipcs_dispatch_fn_t fn) { return gio_poll_dispatch_update(p, fd, evts, data, fn, QB_FALSE); } static int32_t gio_poll_dispatch_del(int32_t fd) { struct gio_to_qb_poll *adaptor; crm_trace("Looking for fd=%d", fd); if (qb_array_index(gio_map, fd, (void **)&adaptor) == 0) { if (adaptor->source) { g_source_remove(adaptor->source); adaptor->source = 0; } } return 0; } struct qb_ipcs_poll_handlers gio_poll_funcs = { .job_add = NULL, .dispatch_add = gio_poll_dispatch_add, .dispatch_mod = gio_poll_dispatch_mod, .dispatch_del = gio_poll_dispatch_del, }; static enum qb_ipc_type pick_ipc_type(enum qb_ipc_type requested) { const char *env = getenv("PCMK_ipc_type"); if (env && strcmp("shared-mem", env) == 0) { return QB_IPC_SHM; } else if (env && strcmp("socket", env) == 0) { return QB_IPC_SOCKET; } else if (env && strcmp("posix", env) == 0) { return QB_IPC_POSIX_MQ; } else if (env && strcmp("sysv", env) == 0) { return QB_IPC_SYSV_MQ; } else if (requested == QB_IPC_NATIVE) { /* We prefer shared memory because the server never blocks on * send. If part of a message fits into the socket, libqb * needs to block until the remainder can be sent also. * Otherwise the client will wait forever for the remaining * bytes. */ return QB_IPC_SHM; } return requested; } qb_ipcs_service_t * mainloop_add_ipc_server(const char *name, enum qb_ipc_type type, struct qb_ipcs_service_handlers *callbacks) { return mainloop_add_ipc_server_with_prio(name, type, callbacks, QB_LOOP_MED); } qb_ipcs_service_t * mainloop_add_ipc_server_with_prio(const char *name, enum qb_ipc_type type, struct qb_ipcs_service_handlers *callbacks, enum qb_loop_priority prio) { int rc = 0; qb_ipcs_service_t *server = NULL; if (gio_map == NULL) { gio_map = qb_array_create_2(64, sizeof(struct gio_to_qb_poll), 1); } server = qb_ipcs_create(name, 0, pick_ipc_type(type), callbacks); if (server == NULL) { crm_err("Could not create %s IPC server: %s (%d)", name, pcmk_strerror(rc), rc); return NULL; } if (prio != QB_LOOP_MED) { qb_ipcs_request_rate_limit(server, conv_libqb_prio2ratelimit(prio)); } -#ifdef HAVE_IPCS_GET_BUFFER_SIZE /* All clients should use at least ipc_buffer_max as their buffer size */ qb_ipcs_enforce_buffer_size(server, crm_ipc_default_buffer_size()); -#endif - qb_ipcs_poll_handlers_set(server, &gio_poll_funcs); rc = qb_ipcs_run(server); if (rc < 0) { crm_err("Could not start %s IPC server: %s (%d)", name, pcmk_strerror(rc), rc); return NULL; } return server; } void mainloop_del_ipc_server(qb_ipcs_service_t * server) { if (server) { qb_ipcs_destroy(server); } } struct mainloop_io_s { char *name; void *userdata; int fd; guint source; crm_ipc_t *ipc; GIOChannel *channel; int (*dispatch_fn_ipc) (const char *buffer, ssize_t length, gpointer userdata); int (*dispatch_fn_io) (gpointer userdata); void (*destroy_fn) (gpointer userdata); }; static gboolean mainloop_gio_callback(GIOChannel * gio, GIOCondition condition, gpointer data) { gboolean keep = TRUE; mainloop_io_t *client = data; CRM_ASSERT(client->fd == g_io_channel_unix_get_fd(gio)); if (condition & G_IO_IN) { if (client->ipc) { long rc = 0; int max = 10; do { rc = crm_ipc_read(client->ipc); if (rc <= 0) { crm_trace("Message acquisition from %s[%p] failed: %s (%ld)", client->name, client, pcmk_strerror(rc), rc); } else if (client->dispatch_fn_ipc) { const char *buffer = crm_ipc_buffer(client->ipc); crm_trace("New message from %s[%p] = %ld (I/O condition=%d)", client->name, client, rc, condition); if (client->dispatch_fn_ipc(buffer, rc, client->userdata) < 0) { crm_trace("Connection to %s no longer required", client->name); keep = FALSE; } } } while (keep && rc > 0 && --max > 0); } else { crm_trace("New message from %s[%p] %u", client->name, client, condition); if (client->dispatch_fn_io) { if (client->dispatch_fn_io(client->userdata) < 0) { crm_trace("Connection to %s no longer required", client->name); keep = FALSE; } } } } if (client->ipc && crm_ipc_connected(client->ipc) == FALSE) { crm_err("Connection to %s closed " CRM_XS "client=%p condition=%d", client->name, client, condition); keep = FALSE; } else if (condition & (G_IO_HUP | G_IO_NVAL | G_IO_ERR)) { crm_trace("The connection %s[%p] has been closed (I/O condition=%d)", client->name, client, condition); keep = FALSE; } else if ((condition & G_IO_IN) == 0) { /* #define GLIB_SYSDEF_POLLIN =1 #define GLIB_SYSDEF_POLLPRI =2 #define GLIB_SYSDEF_POLLOUT =4 #define GLIB_SYSDEF_POLLERR =8 #define GLIB_SYSDEF_POLLHUP =16 #define GLIB_SYSDEF_POLLNVAL =32 typedef enum { G_IO_IN GLIB_SYSDEF_POLLIN, G_IO_OUT GLIB_SYSDEF_POLLOUT, G_IO_PRI GLIB_SYSDEF_POLLPRI, G_IO_ERR GLIB_SYSDEF_POLLERR, G_IO_HUP GLIB_SYSDEF_POLLHUP, G_IO_NVAL GLIB_SYSDEF_POLLNVAL } GIOCondition; A bitwise combination representing a condition to watch for on an event source. G_IO_IN There is data to read. G_IO_OUT Data can be written (without blocking). G_IO_PRI There is urgent data to read. G_IO_ERR Error condition. G_IO_HUP Hung up (the connection has been broken, usually for pipes and sockets). G_IO_NVAL Invalid request. The file descriptor is not open. */ crm_err("Strange condition: %d", condition); } /* keep == FALSE results in mainloop_gio_destroy() being called * just before the source is removed from mainloop */ return keep; } static void mainloop_gio_destroy(gpointer c) { mainloop_io_t *client = c; char *c_name = strdup(client->name); /* client->source is valid but about to be destroyed (ref_count == 0) in gmain.c * client->channel will still have ref_count > 0... should be == 1 */ crm_trace("Destroying client %s[%p]", c_name, c); if (client->ipc) { crm_ipc_close(client->ipc); } if (client->destroy_fn) { void (*destroy_fn) (gpointer userdata) = client->destroy_fn; client->destroy_fn = NULL; destroy_fn(client->userdata); } if (client->ipc) { crm_ipc_t *ipc = client->ipc; client->ipc = NULL; crm_ipc_destroy(ipc); } crm_trace("Destroyed client %s[%p]", c_name, c); free(client->name); client->name = NULL; free(client); free(c_name); } /*! * \brief Connect to IPC and add it as a main loop source * * \param[in] ipc IPC connection to add * \param[in] priority Event source priority to use for connection * \param[in] userdata Data to register with callbacks * \param[in] callbacks Dispatch and destroy callbacks for connection * \param[out] source Newly allocated event source * * \return Standard Pacemaker return code * * \note On failure, the caller is still responsible for ipc. On success, the * caller should call mainloop_del_ipc_client() when source is no longer * needed, which will lead to the disconnection of the IPC later in the * main loop if it is connected. However the IPC disconnects, * mainloop_gio_destroy() will free ipc and source after calling the * destroy callback. */ int pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata, struct ipc_client_callbacks *callbacks, mainloop_io_t **source) { CRM_CHECK((ipc != NULL) && (callbacks != NULL), return EINVAL); if (!crm_ipc_connect(ipc)) { int rc = errno; crm_debug("Connection to %s failed: %d", crm_ipc_name(ipc), errno); return rc; } *source = mainloop_add_fd(crm_ipc_name(ipc), priority, crm_ipc_get_fd(ipc), userdata, NULL); if (*source == NULL) { int rc = errno; crm_ipc_close(ipc); return rc; } (*source)->ipc = ipc; (*source)->destroy_fn = callbacks->destroy; (*source)->dispatch_fn_ipc = callbacks->dispatch; return pcmk_rc_ok; } mainloop_io_t * mainloop_add_ipc_client(const char *name, int priority, size_t max_size, void *userdata, struct ipc_client_callbacks *callbacks) { crm_ipc_t *ipc = crm_ipc_new(name, max_size); mainloop_io_t *source = NULL; int rc = pcmk__add_mainloop_ipc(ipc, priority, userdata, callbacks, &source); if (rc != pcmk_rc_ok) { if (crm_log_level == LOG_STDOUT) { fprintf(stderr, "Connection to %s failed: %s", name, pcmk_rc_str(rc)); } crm_ipc_destroy(ipc); if (rc > 0) { errno = rc; } else { errno = ENOTCONN; } return NULL; } return source; } void mainloop_del_ipc_client(mainloop_io_t * client) { mainloop_del_fd(client); } crm_ipc_t * mainloop_get_ipc_client(mainloop_io_t * client) { if (client) { return client->ipc; } return NULL; } mainloop_io_t * mainloop_add_fd(const char *name, int priority, int fd, void *userdata, struct mainloop_fd_callbacks * callbacks) { mainloop_io_t *client = NULL; if (fd >= 0) { client = calloc(1, sizeof(mainloop_io_t)); if (client == NULL) { return NULL; } client->name = strdup(name); client->userdata = userdata; if (callbacks) { client->destroy_fn = callbacks->destroy; client->dispatch_fn_io = callbacks->dispatch; } client->fd = fd; client->channel = g_io_channel_unix_new(fd); client->source = g_io_add_watch_full(client->channel, priority, (G_IO_IN | G_IO_HUP | G_IO_NVAL | G_IO_ERR), mainloop_gio_callback, client, mainloop_gio_destroy); /* Now that mainloop now holds a reference to channel, * thanks to g_io_add_watch_full(), drop ours from g_io_channel_unix_new(). * * This means that channel will be free'd by: * g_main_context_dispatch() or g_source_remove() * -> g_source_destroy_internal() * -> g_source_callback_unref() * shortly after mainloop_gio_destroy() completes */ g_io_channel_unref(client->channel); crm_trace("Added connection %d for %s[%p].%d", client->source, client->name, client, fd); } else { errno = EINVAL; } return client; } void mainloop_del_fd(mainloop_io_t * client) { if (client != NULL) { crm_trace("Removing client %s[%p]", client->name, client); if (client->source) { /* Results in mainloop_gio_destroy() being called just * before the source is removed from mainloop */ g_source_remove(client->source); } } } static GListPtr child_list = NULL; pid_t mainloop_child_pid(mainloop_child_t * child) { return child->pid; } const char * mainloop_child_name(mainloop_child_t * child) { return child->desc; } int mainloop_child_timeout(mainloop_child_t * child) { return child->timeout; } void * mainloop_child_userdata(mainloop_child_t * child) { return child->privatedata; } void mainloop_clear_child_userdata(mainloop_child_t * child) { child->privatedata = NULL; } /* good function name */ static void child_free(mainloop_child_t *child) { if (child->timerid != 0) { crm_trace("Removing timer %d", child->timerid); g_source_remove(child->timerid); child->timerid = 0; } free(child->desc); free(child); } /* terrible function name */ static int child_kill_helper(mainloop_child_t *child) { int rc; if (child->flags & mainloop_leave_pid_group) { crm_debug("Kill pid %d only. leave group intact.", child->pid); rc = kill(child->pid, SIGKILL); } else { crm_debug("Kill pid %d's group", child->pid); rc = kill(-child->pid, SIGKILL); } if (rc < 0) { if (errno != ESRCH) { crm_perror(LOG_ERR, "kill(%d, KILL) failed", child->pid); } return -errno; } return 0; } static gboolean child_timeout_callback(gpointer p) { mainloop_child_t *child = p; int rc = 0; child->timerid = 0; if (child->timeout) { crm_crit("%s process (PID %d) will not die!", child->desc, (int)child->pid); return FALSE; } rc = child_kill_helper(child); if (rc == -ESRCH) { /* Nothing left to do. pid doesn't exist */ return FALSE; } child->timeout = TRUE; crm_warn("%s process (PID %d) timed out", child->desc, (int)child->pid); child->timerid = g_timeout_add(5000, child_timeout_callback, child); return FALSE; } static bool child_waitpid(mainloop_child_t *child, int flags) { int rc = 0; int core = 0; int signo = 0; int status = 0; int exitcode = 0; bool callback_needed = true; rc = waitpid(child->pid, &status, flags); if (rc == 0) { // WNOHANG in flags, and child status is not available crm_trace("Child process %d (%s) still active", child->pid, child->desc); callback_needed = false; } else if (rc != child->pid) { /* According to POSIX, possible conditions: * - child->pid was non-positive (process group or any child), * and rc is specific child * - errno ECHILD (pid does not exist or is not child) * - errno EINVAL (invalid flags) * - errno EINTR (caller interrupted by signal) * * @TODO Handle these cases more specifically. */ signo = SIGCHLD; exitcode = 1; crm_notice("Wait for child process %d (%s) interrupted: %s", child->pid, child->desc, pcmk_strerror(errno)); } else if (WIFEXITED(status)) { exitcode = WEXITSTATUS(status); crm_trace("Child process %d (%s) exited with status %d", child->pid, child->desc, exitcode); } else if (WIFSIGNALED(status)) { signo = WTERMSIG(status); crm_trace("Child process %d (%s) exited with signal %d (%s)", child->pid, child->desc, signo, strsignal(signo)); #ifdef WCOREDUMP // AIX, SunOS, maybe others } else if (WCOREDUMP(status)) { core = 1; crm_err("Child process %d (%s) dumped core", child->pid, child->desc); #endif } else { // flags must contain WUNTRACED and/or WCONTINUED to reach this crm_trace("Child process %d (%s) stopped or continued", child->pid, child->desc); callback_needed = false; } if (callback_needed && child->callback) { child->callback(child, child->pid, core, signo, exitcode); } return callback_needed; } static void child_death_dispatch(int signal) { for (GList *iter = child_list; iter; ) { GList *saved = iter; mainloop_child_t *child = iter->data; iter = iter->next; if (child_waitpid(child, WNOHANG)) { crm_trace("Removing completed process %d from child list", child->pid); child_list = g_list_remove_link(child_list, saved); g_list_free(saved); child_free(child); } } } static gboolean child_signal_init(gpointer p) { crm_trace("Installed SIGCHLD handler"); /* Do NOT use g_child_watch_add() and friends, they rely on pthreads */ mainloop_add_signal(SIGCHLD, child_death_dispatch); /* In case they terminated before the signal handler was installed */ child_death_dispatch(SIGCHLD); return FALSE; } gboolean mainloop_child_kill(pid_t pid) { GListPtr iter; mainloop_child_t *child = NULL; mainloop_child_t *match = NULL; /* It is impossible to block SIGKILL, this allows us to * call waitpid without WNOHANG flag.*/ int waitflags = 0, rc = 0; for (iter = child_list; iter != NULL && match == NULL; iter = iter->next) { child = iter->data; if (pid == child->pid) { match = child; } } if (match == NULL) { return FALSE; } rc = child_kill_helper(match); if(rc == -ESRCH) { /* It's gone, but hasn't shown up in waitpid() yet. Wait until we get * SIGCHLD and let handler clean it up as normal (so we get the correct * return code/status). The blocking alternative would be to call * child_waitpid(match, 0). */ crm_trace("Waiting for signal that child process %d completed", match->pid); return TRUE; } else if(rc != 0) { /* If KILL for some other reason set the WNOHANG flag since we * can't be certain what happened. */ waitflags = WNOHANG; } if (!child_waitpid(match, waitflags)) { /* not much we can do if this occurs */ return FALSE; } child_list = g_list_remove(child_list, match); child_free(match); return TRUE; } /* Create/Log a new tracked process * To track a process group, use -pid * * @TODO Using a non-positive pid (i.e. any child, or process group) would * likely not be useful since we will free the child after the first * completed process. */ void mainloop_child_add_with_flags(pid_t pid, int timeout, const char *desc, void *privatedata, enum mainloop_child_flags flags, void (*callback) (mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode)) { static bool need_init = TRUE; mainloop_child_t *child = g_new(mainloop_child_t, 1); child->pid = pid; child->timerid = 0; child->timeout = FALSE; child->privatedata = privatedata; child->callback = callback; child->flags = flags; if(desc) { child->desc = strdup(desc); } if (timeout) { child->timerid = g_timeout_add(timeout, child_timeout_callback, child); } child_list = g_list_append(child_list, child); if(need_init) { need_init = FALSE; /* SIGCHLD processing has to be invoked from mainloop. * We do not want it to be possible to both add a child pid * to mainloop, and have the pid's exit callback invoked within * the same callstack. */ g_timeout_add(1, child_signal_init, NULL); } } void mainloop_child_add(pid_t pid, int timeout, const char *desc, void *privatedata, void (*callback) (mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode)) { mainloop_child_add_with_flags(pid, timeout, desc, privatedata, 0, callback); } struct mainloop_timer_s { guint id; guint period_ms; bool repeat; char *name; GSourceFunc cb; void *userdata; }; static gboolean mainloop_timer_cb(gpointer user_data) { int id = 0; bool repeat = FALSE; struct mainloop_timer_s *t = user_data; CRM_ASSERT(t != NULL); id = t->id; t->id = 0; /* Ensure it's unset during callbacks so that * mainloop_timer_running() works as expected */ if(t->cb) { crm_trace("Invoking callbacks for timer %s", t->name); repeat = t->repeat; if(t->cb(t->userdata) == FALSE) { crm_trace("Timer %s complete", t->name); repeat = FALSE; } } if(repeat) { /* Restore if repeating */ t->id = id; } return repeat; } bool mainloop_timer_running(mainloop_timer_t *t) { if(t && t->id != 0) { return TRUE; } return FALSE; } void mainloop_timer_start(mainloop_timer_t *t) { mainloop_timer_stop(t); if(t && t->period_ms > 0) { crm_trace("Starting timer %s", t->name); t->id = g_timeout_add(t->period_ms, mainloop_timer_cb, t); } } void mainloop_timer_stop(mainloop_timer_t *t) { if(t && t->id != 0) { crm_trace("Stopping timer %s", t->name); g_source_remove(t->id); t->id = 0; } } guint mainloop_timer_set_period(mainloop_timer_t *t, guint period_ms) { guint last = 0; if(t) { last = t->period_ms; t->period_ms = period_ms; } if(t && t->id != 0 && last != t->period_ms) { mainloop_timer_start(t); } return last; } mainloop_timer_t * mainloop_timer_add(const char *name, guint period_ms, bool repeat, GSourceFunc cb, void *userdata) { mainloop_timer_t *t = calloc(1, sizeof(mainloop_timer_t)); if(t) { if(name) { t->name = crm_strdup_printf("%s-%u-%d", name, period_ms, repeat); } else { t->name = crm_strdup_printf("%p-%u-%d", t, period_ms, repeat); } t->id = 0; t->period_ms = period_ms; t->repeat = repeat; t->cb = cb; t->userdata = userdata; crm_trace("Created timer %s with %p %p", t->name, userdata, t->userdata); } return t; } void mainloop_timer_del(mainloop_timer_t *t) { if(t) { crm_trace("Destroying timer %s", t->name); mainloop_timer_stop(t); free(t->name); free(t); } } /* * Helpers to make sure certain events aren't lost at shutdown */ static gboolean drain_timeout_cb(gpointer user_data) { bool *timeout_popped = (bool*) user_data; *timeout_popped = TRUE; return FALSE; } /*! * \brief Drain some remaining main loop events then quit it * * \param[in] mloop Main loop to drain and quit * \param[in] n Drain up to this many pending events */ void pcmk_quit_main_loop(GMainLoop *mloop, unsigned int n) { if ((mloop != NULL) && g_main_loop_is_running(mloop)) { GMainContext *ctx = g_main_loop_get_context(mloop); /* Drain up to n events in case some memory clean-up is pending * (helpful to reduce noise in valgrind output). */ for (int i = 0; (i < n) && g_main_context_pending(ctx); ++i) { g_main_context_dispatch(ctx); } g_main_loop_quit(mloop); } } /*! * \brief Process main loop events while a certain condition is met * * \param[in] mloop Main loop to process * \param[in] timer_ms Don't process longer than this amount of time * \param[in] check Function that returns TRUE if events should be processed * * \note This function is intended to be called at shutdown if certain important * events should not be missed. The caller would likely quit the main loop * or exit after calling this function. The check() function will be * passed the remaining timeout in milliseconds. */ void pcmk_drain_main_loop(GMainLoop *mloop, guint timer_ms, bool (*check)(guint)) { bool timeout_popped = FALSE; guint timer = 0; GMainContext *ctx = NULL; CRM_CHECK(mloop && check, return); ctx = g_main_loop_get_context(mloop); if (ctx) { time_t start_time = time(NULL); timer = g_timeout_add(timer_ms, drain_timeout_cb, &timeout_popped); while (!timeout_popped && check(timer_ms - (time(NULL) - start_time) * 1000)) { g_main_context_iteration(ctx, TRUE); } } if (!timeout_popped && (timer > 0)) { g_source_remove(timer); } } // Deprecated functions kept only for backward API compatibility gboolean crm_signal(int sig, void (*dispatch) (int sig)); /* * \brief Use crm_signal_handler() instead * \deprecated */ gboolean crm_signal(int sig, void (*dispatch) (int sig)) { return crm_signal_handler(sig, dispatch) != SIG_ERR; } diff --git a/lib/common/remote.c b/lib/common/remote.c index 4f2c2f08f6..cee6f59670 100644 --- a/lib/common/remote.c +++ b/lib/common/remote.c @@ -1,1282 +1,1278 @@ /* * Copyright 2008-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* X32T ~ PRIx32 */ #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # include #endif /* Swab macros from linux/swab.h */ #ifdef HAVE_LINUX_SWAB_H # include #else /* * casts are necessary for constants, because we never know how for sure * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. */ #define __swab16(x) ((uint16_t)( \ (((uint16_t)(x) & (uint16_t)0x00ffU) << 8) | \ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8))) #define __swab32(x) ((uint32_t)( \ (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \ (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \ (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \ (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24))) #define __swab64(x) ((uint64_t)( \ (((uint64_t)(x) & (uint64_t)0x00000000000000ffULL) << 56) | \ (((uint64_t)(x) & (uint64_t)0x000000000000ff00ULL) << 40) | \ (((uint64_t)(x) & (uint64_t)0x0000000000ff0000ULL) << 24) | \ (((uint64_t)(x) & (uint64_t)0x00000000ff000000ULL) << 8) | \ (((uint64_t)(x) & (uint64_t)0x000000ff00000000ULL) >> 8) | \ (((uint64_t)(x) & (uint64_t)0x0000ff0000000000ULL) >> 24) | \ (((uint64_t)(x) & (uint64_t)0x00ff000000000000ULL) >> 40) | \ (((uint64_t)(x) & (uint64_t)0xff00000000000000ULL) >> 56))) #endif #define REMOTE_MSG_VERSION 1 #define ENDIAN_LOCAL 0xBADADBBD struct remote_header_v0 { uint32_t endian; /* Detect messages from hosts with different endian-ness */ uint32_t version; uint64_t id; uint64_t flags; uint32_t size_total; uint32_t payload_offset; uint32_t payload_compressed; uint32_t payload_uncompressed; /* New fields get added here */ } __attribute__ ((packed)); /*! * \internal * \brief Retrieve remote message header, in local endianness * * Return a pointer to the header portion of a remote connection's message * buffer, converting the header to local endianness if needed. * * \param[in,out] remote Remote connection with new message * * \return Pointer to message header, localized if necessary */ static struct remote_header_v0 * localized_remote_header(pcmk__remote_t *remote) { struct remote_header_v0 *header = (struct remote_header_v0 *)remote->buffer; if(remote->buffer_offset < sizeof(struct remote_header_v0)) { return NULL; } else if(header->endian != ENDIAN_LOCAL) { uint32_t endian = __swab32(header->endian); CRM_LOG_ASSERT(endian == ENDIAN_LOCAL); if(endian != ENDIAN_LOCAL) { crm_err("Invalid message detected, endian mismatch: %" X32T " is neither %" X32T " nor the swab'd %" X32T, ENDIAN_LOCAL, header->endian, endian); return NULL; } header->id = __swab64(header->id); header->flags = __swab64(header->flags); header->endian = __swab32(header->endian); header->version = __swab32(header->version); header->size_total = __swab32(header->size_total); header->payload_offset = __swab32(header->payload_offset); header->payload_compressed = __swab32(header->payload_compressed); header->payload_uncompressed = __swab32(header->payload_uncompressed); } return header; } #ifdef HAVE_GNUTLS_GNUTLS_H int pcmk__tls_client_handshake(pcmk__remote_t *remote, int timeout_ms) { int rc = 0; int pollrc = 0; time_t time_limit = time(NULL) + timeout_ms / 1000; do { rc = gnutls_handshake(*remote->tls_session); if ((rc == GNUTLS_E_INTERRUPTED) || (rc == GNUTLS_E_AGAIN)) { pollrc = pcmk__remote_ready(remote, 1000); if ((pollrc != pcmk_rc_ok) && (pollrc != ETIME)) { /* poll returned error, there is no hope */ crm_trace("TLS handshake poll failed: %s (%d)", pcmk_strerror(pollrc), pollrc); return pcmk_legacy2rc(pollrc); } } else if (rc < 0) { crm_trace("TLS handshake failed: %s (%d)", gnutls_strerror(rc), rc); return EPROTO; } else { return pcmk_rc_ok; } } while (time(NULL) < time_limit); return ETIME; } /*! * \internal * \brief Set minimum prime size required by TLS client * * \param[in] session TLS session to affect */ static void set_minimum_dh_bits(gnutls_session_t *session) { const char *dh_min_bits_s = getenv("PCMK_dh_min_bits"); if (dh_min_bits_s) { int dh_min_bits = crm_parse_int(dh_min_bits_s, "0"); /* This function is deprecated since GnuTLS 3.1.7, in favor of letting * the priority string imply the DH requirements, but this is the only * way to give the user control over compatibility with older servers. */ if (dh_min_bits > 0) { crm_info("Requiring server use a Diffie-Hellman prime of at least %d bits", dh_min_bits); gnutls_dh_set_prime_bits(*session, dh_min_bits); } } } static unsigned int get_bound_dh_bits(unsigned int dh_bits) { const char *dh_min_bits_s = getenv("PCMK_dh_min_bits"); const char *dh_max_bits_s = getenv("PCMK_dh_max_bits"); int dh_min_bits = 0; int dh_max_bits = 0; if (dh_min_bits_s) { dh_min_bits = crm_parse_int(dh_min_bits_s, "0"); } if (dh_max_bits_s) { dh_max_bits = crm_parse_int(dh_max_bits_s, "0"); if ((dh_min_bits > 0) && (dh_max_bits > 0) && (dh_max_bits < dh_min_bits)) { crm_warn("Ignoring PCMK_dh_max_bits because it is less than PCMK_dh_min_bits"); dh_max_bits = 0; } } if ((dh_min_bits > 0) && (dh_bits < dh_min_bits)) { return dh_min_bits; } if ((dh_max_bits > 0) && (dh_bits > dh_max_bits)) { return dh_max_bits; } return dh_bits; } /*! * \internal * \brief Initialize a new TLS session * * \param[in] csock Connected socket for TLS session * \param[in] conn_type GNUTLS_SERVER or GNUTLS_CLIENT * \param[in] cred_type GNUTLS_CRD_ANON or GNUTLS_CRD_PSK * \param[in] credentials TLS session credentials * * \return Pointer to newly created session object, or NULL on error */ gnutls_session_t * pcmk__new_tls_session(int csock, unsigned int conn_type, gnutls_credentials_type_t cred_type, void *credentials) { int rc = GNUTLS_E_SUCCESS; const char *prio_base = NULL; char *prio = NULL; gnutls_session_t *session = NULL; /* Determine list of acceptable ciphers, etc. Pacemaker always adds the * values required for its functionality. * * For an example of anonymous authentication, see: * http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication */ prio_base = getenv("PCMK_tls_priorities"); if (prio_base == NULL) { prio_base = PCMK_GNUTLS_PRIORITIES; } prio = crm_strdup_printf("%s:%s", prio_base, (cred_type == GNUTLS_CRD_ANON)? "+ANON-DH" : "+DHE-PSK:+PSK"); session = gnutls_malloc(sizeof(gnutls_session_t)); if (session == NULL) { rc = GNUTLS_E_MEMORY_ERROR; goto error; } rc = gnutls_init(session, conn_type); if (rc != GNUTLS_E_SUCCESS) { goto error; } /* @TODO On the server side, it would be more efficient to cache the * priority with gnutls_priority_init2() and set it with * gnutls_priority_set() for all sessions. */ rc = gnutls_priority_set_direct(*session, prio, NULL); if (rc != GNUTLS_E_SUCCESS) { goto error; } if (conn_type == GNUTLS_CLIENT) { set_minimum_dh_bits(session); } gnutls_transport_set_ptr(*session, (gnutls_transport_ptr_t) GINT_TO_POINTER(csock)); rc = gnutls_credentials_set(*session, cred_type, credentials); if (rc != GNUTLS_E_SUCCESS) { goto error; } free(prio); return session; error: crm_err("Could not initialize %s TLS %s session: %s " CRM_XS " rc=%d priority='%s'", (cred_type == GNUTLS_CRD_ANON)? "anonymous" : "PSK", (conn_type == GNUTLS_SERVER)? "server" : "client", gnutls_strerror(rc), rc, prio); free(prio); if (session != NULL) { gnutls_free(session); } return NULL; } /*! * \internal * \brief Initialize Diffie-Hellman parameters for a TLS server * * \param[out] dh_params Parameter object to initialize * * \return Standard Pacemaker return code * \todo The current best practice is to allow the client and server to * negotiate the Diffie-Hellman parameters via a TLS extension (RFC 7919). * However, we have to support both older versions of GnuTLS (<3.6) that * don't support the extension on our side, and older Pacemaker versions * that don't support the extension on the other side. The next best * practice would be to use a known good prime (see RFC 5114 section 2.2), * possibly stored in a file distributed with Pacemaker. */ int pcmk__init_tls_dh(gnutls_dh_params_t *dh_params) { int rc = GNUTLS_E_SUCCESS; unsigned int dh_bits = 0; rc = gnutls_dh_params_init(dh_params); if (rc != GNUTLS_E_SUCCESS) { goto error; } -#ifdef HAVE_GNUTLS_SEC_PARAM_TO_PK_BITS dh_bits = gnutls_sec_param_to_pk_bits(GNUTLS_PK_DH, GNUTLS_SEC_PARAM_NORMAL); if (dh_bits == 0) { rc = GNUTLS_E_DH_PRIME_UNACCEPTABLE; goto error; } -#else - dh_bits = 1024; -#endif dh_bits = get_bound_dh_bits(dh_bits); crm_info("Generating Diffie-Hellman parameters with %u-bit prime for TLS", dh_bits); rc = gnutls_dh_params_generate2(*dh_params, dh_bits); if (rc != GNUTLS_E_SUCCESS) { goto error; } return pcmk_rc_ok; error: crm_err("Could not initialize Diffie-Hellman parameters for TLS: %s " CRM_XS " rc=%d", gnutls_strerror(rc), rc); return EPROTO; } /*! * \internal * \brief Process handshake data from TLS client * * Read as much TLS handshake data as is available. * * \param[in] client Client connection * * \return Standard Pacemaker return code (of particular interest, EAGAIN * if some data was successfully read but more data is needed) */ int pcmk__read_handshake_data(pcmk__client_t *client) { int rc = 0; CRM_ASSERT(client && client->remote && client->remote->tls_session); do { rc = gnutls_handshake(*client->remote->tls_session); } while (rc == GNUTLS_E_INTERRUPTED); if (rc == GNUTLS_E_AGAIN) { /* No more data is available at the moment. This function should be * invoked again once the client sends more. */ return EAGAIN; } else if (rc != GNUTLS_E_SUCCESS) { crm_err("TLS handshake with remote client failed: %s " CRM_XS " rc=%d", gnutls_strerror(rc), rc); return EPROTO; } return pcmk_rc_ok; } // \return Standard Pacemaker return code static int send_tls(gnutls_session_t *session, struct iovec *iov) { const char *unsent = iov->iov_base; size_t unsent_len = iov->iov_len; ssize_t gnutls_rc; if (unsent == NULL) { return EINVAL; } crm_trace("Sending TLS message of %llu bytes", (unsigned long long) unsent_len); while (true) { gnutls_rc = gnutls_record_send(*session, unsent, unsent_len); if (gnutls_rc == GNUTLS_E_INTERRUPTED || gnutls_rc == GNUTLS_E_AGAIN) { crm_trace("Retrying to send %llu bytes remaining", (unsigned long long) unsent_len); } else if (gnutls_rc < 0) { // Caller can log as error if necessary crm_info("TLS connection terminated: %s " CRM_XS " rc=%lld", gnutls_strerror((int) gnutls_rc), (long long) gnutls_rc); return ECONNABORTED; } else if (gnutls_rc < unsent_len) { crm_trace("Sent %lld of %llu bytes remaining", (long long) gnutls_rc, (unsigned long long) unsent_len); unsent_len -= gnutls_rc; unsent += gnutls_rc; } else { crm_trace("Sent all %lld bytes remaining", (long long) gnutls_rc); break; } } return pcmk_rc_ok; } #endif // \return Standard Pacemaker return code static int send_plaintext(int sock, struct iovec *iov) { const char *unsent = iov->iov_base; size_t unsent_len = iov->iov_len; ssize_t write_rc; if (unsent == NULL) { return EINVAL; } crm_debug("Sending plaintext message of %llu bytes to socket %d", (unsigned long long) unsent_len, sock); while (true) { write_rc = write(sock, unsent, unsent_len); if (write_rc < 0) { int rc = errno; if ((errno == EINTR) || (errno == EAGAIN)) { crm_trace("Retrying to send %llu bytes remaining to socket %d", (unsigned long long) unsent_len, sock); continue; } // Caller can log as error if necessary crm_info("Could not send message: %s " CRM_XS " rc=%d socket=%d", pcmk_rc_str(rc), rc, sock); return rc; } else if (write_rc < unsent_len) { crm_trace("Sent %lld of %llu bytes remaining", (long long) write_rc, (unsigned long long) unsent_len); unsent += write_rc; unsent_len -= write_rc; continue; } else { crm_trace("Sent all %lld bytes remaining: %.100s", (long long) write_rc, (char *) (iov->iov_base)); break; } } return pcmk_rc_ok; } // \return Standard Pacemaker return code static int remote_send_iovs(pcmk__remote_t *remote, struct iovec *iov, int iovs) { int rc = pcmk_rc_ok; for (int lpc = 0; (lpc < iovs) && (rc == pcmk_rc_ok); lpc++) { #ifdef HAVE_GNUTLS_GNUTLS_H if (remote->tls_session) { rc = send_tls(remote->tls_session, &(iov[lpc])); continue; } #endif if (remote->tcp_socket) { rc = send_plaintext(remote->tcp_socket, &(iov[lpc])); } else { rc = ESOCKTNOSUPPORT; } } return rc; } /*! * \internal * \brief Send an XML message over a Pacemaker Remote connection * * \param[in] remote Pacemaker Remote connection to use * \param[in] msg XML to send * * \return Standard Pacemaker return code */ int pcmk__remote_send_xml(pcmk__remote_t *remote, xmlNode *msg) { int rc = pcmk_rc_ok; static uint64_t id = 0; char *xml_text = NULL; struct iovec iov[2]; struct remote_header_v0 *header; CRM_CHECK((remote != NULL) && (msg != NULL), return EINVAL); xml_text = dump_xml_unformatted(msg); CRM_CHECK(xml_text != NULL, return EINVAL); header = calloc(1, sizeof(struct remote_header_v0)); CRM_ASSERT(header != NULL); iov[0].iov_base = header; iov[0].iov_len = sizeof(struct remote_header_v0); iov[1].iov_base = xml_text; iov[1].iov_len = 1 + strlen(xml_text); id++; header->id = id; header->endian = ENDIAN_LOCAL; header->version = REMOTE_MSG_VERSION; header->payload_offset = iov[0].iov_len; header->payload_uncompressed = iov[1].iov_len; header->size_total = iov[0].iov_len + iov[1].iov_len; rc = remote_send_iovs(remote, iov, 2); if (rc != pcmk_rc_ok) { crm_err("Could not send remote message: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); } free(iov[0].iov_base); free(iov[1].iov_base); return rc; } /*! * \internal * \brief Obtain the XML from the currently buffered remote connection message * * \param[in] remote Remote connection possibly with message available * * \return Newly allocated XML object corresponding to message data, or NULL * \note This effectively removes the message from the connection buffer. */ xmlNode * pcmk__remote_message_xml(pcmk__remote_t *remote) { xmlNode *xml = NULL; struct remote_header_v0 *header = localized_remote_header(remote); if (header == NULL) { return NULL; } /* Support compression on the receiving end now, in case we ever want to add it later */ if (header->payload_compressed) { int rc = 0; unsigned int size_u = 1 + header->payload_uncompressed; char *uncompressed = calloc(1, header->payload_offset + size_u); crm_trace("Decompressing message data %d bytes into %d bytes", header->payload_compressed, size_u); rc = BZ2_bzBuffToBuffDecompress(uncompressed + header->payload_offset, &size_u, remote->buffer + header->payload_offset, header->payload_compressed, 1, 0); if (rc != BZ_OK && header->version > REMOTE_MSG_VERSION) { crm_warn("Couldn't decompress v%d message, we only understand v%d", header->version, REMOTE_MSG_VERSION); free(uncompressed); return NULL; } else if (rc != BZ_OK) { crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", bz2_strerror(rc), rc); free(uncompressed); return NULL; } CRM_ASSERT(size_u == header->payload_uncompressed); memcpy(uncompressed, remote->buffer, header->payload_offset); /* Preserve the header */ remote->buffer_size = header->payload_offset + size_u; free(remote->buffer); remote->buffer = uncompressed; header = localized_remote_header(remote); } /* take ownership of the buffer */ remote->buffer_offset = 0; CRM_LOG_ASSERT(remote->buffer[sizeof(struct remote_header_v0) + header->payload_uncompressed - 1] == 0); xml = string2xml(remote->buffer + header->payload_offset); if (xml == NULL && header->version > REMOTE_MSG_VERSION) { crm_warn("Couldn't parse v%d message, we only understand v%d", header->version, REMOTE_MSG_VERSION); } else if (xml == NULL) { crm_err("Couldn't parse: '%.120s'", remote->buffer + header->payload_offset); } return xml; } static int get_remote_socket(pcmk__remote_t *remote) { #ifdef HAVE_GNUTLS_GNUTLS_H if (remote->tls_session) { void *sock_ptr = gnutls_transport_get_ptr(*remote->tls_session); return GPOINTER_TO_INT(sock_ptr); } #endif if (remote->tcp_socket) { return remote->tcp_socket; } crm_err("Remote connection type undetermined (bug?)"); return -1; } /*! * \internal * \brief Wait for a remote session to have data to read * * \param[in] remote Connection to check * \param[in] timeout_ms Maximum time (in ms) to wait * * \return Standard Pacemaker return code (of particular interest, pcmk_rc_ok if * there is data ready to be read, and ETIME if there is no data within * the specified timeout) */ int pcmk__remote_ready(pcmk__remote_t *remote, int timeout_ms) { struct pollfd fds = { 0, }; int sock = 0; int rc = 0; time_t start; int timeout = timeout_ms; sock = get_remote_socket(remote); if (sock <= 0) { crm_trace("No longer connected"); return ENOTCONN; } start = time(NULL); errno = 0; do { fds.fd = sock; fds.events = POLLIN; /* If we got an EINTR while polling, and we have a * specific timeout we are trying to honor, attempt * to adjust the timeout to the closest second. */ if (errno == EINTR && (timeout > 0)) { timeout = timeout_ms - ((time(NULL) - start) * 1000); if (timeout < 1000) { timeout = 1000; } } rc = poll(&fds, 1, timeout); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; } return (rc == 0)? ETIME : pcmk_rc_ok; } /*! * \internal * \brief Read bytes from non-blocking remote connection * * \param[in] remote Remote connection to read * * \return Standard Pacemaker return code (of particular interest, pcmk_rc_ok if * a full message has been received, or EAGAIN for a partial message) * \note Use only with non-blocking sockets after polling the socket. * \note This function will return when the socket read buffer is empty or an * error is encountered. */ static int read_available_remote_data(pcmk__remote_t *remote) { int rc = pcmk_rc_ok; size_t read_len = sizeof(struct remote_header_v0); struct remote_header_v0 *header = localized_remote_header(remote); bool received = false; ssize_t read_rc; if(header) { /* Stop at the end of the current message */ read_len = header->size_total; } /* automatically grow the buffer when needed */ if(remote->buffer_size < read_len) { remote->buffer_size = 2 * read_len; crm_trace("Expanding buffer to %llu bytes", (unsigned long long) remote->buffer_size); remote->buffer = pcmk__realloc(remote->buffer, remote->buffer_size + 1); } #ifdef HAVE_GNUTLS_GNUTLS_H if (!received && remote->tls_session) { read_rc = gnutls_record_recv(*(remote->tls_session), remote->buffer + remote->buffer_offset, remote->buffer_size - remote->buffer_offset); if (read_rc == GNUTLS_E_INTERRUPTED) { rc = EINTR; } else if (read_rc == GNUTLS_E_AGAIN) { rc = EAGAIN; } else if (read_rc < 0) { crm_debug("TLS receive failed: %s (%lld)", gnutls_strerror(read_rc), (long long) read_rc); rc = EIO; } received = true; } #endif if (!received && remote->tcp_socket) { read_rc = read(remote->tcp_socket, remote->buffer + remote->buffer_offset, remote->buffer_size - remote->buffer_offset); if (read_rc < 0) { rc = errno; } received = true; } if (!received) { crm_err("Remote connection type undetermined (bug?)"); return ESOCKTNOSUPPORT; } /* process any errors. */ if (read_rc > 0) { remote->buffer_offset += read_rc; /* always null terminate buffer, the +1 to alloc always allows for this. */ remote->buffer[remote->buffer_offset] = '\0'; crm_trace("Received %lld more bytes (%llu total)", (long long) read_rc, (unsigned long long) remote->buffer_offset); } else if ((rc == EINTR) || (rc == EAGAIN)) { crm_trace("No data available for non-blocking remote read: %s (%d)", pcmk_rc_str(rc), rc); } else if (read_rc == 0) { crm_debug("End of remote data encountered after %llu bytes", (unsigned long long) remote->buffer_offset); return ENOTCONN; } else { crm_debug("Error receiving remote data after %llu bytes: %s (%d)", (unsigned long long) remote->buffer_offset, pcmk_rc_str(rc), rc); return ENOTCONN; } header = localized_remote_header(remote); if(header) { if(remote->buffer_offset < header->size_total) { crm_trace("Read partial remote message (%llu of %u bytes)", (unsigned long long) remote->buffer_offset, header->size_total); } else { crm_trace("Read full remote message of %llu bytes", (unsigned long long) remote->buffer_offset); return pcmk_rc_ok; } } return EAGAIN; } /*! * \internal * \brief Read one message from a remote connection * * \param[in] remote Remote connection to read * \param[in] timeout_ms Fail if message not read in this many milliseconds * (10s will be used if 0, and 60s if negative) * * \return Standard Pacemaker return code */ int pcmk__read_remote_message(pcmk__remote_t *remote, int timeout_ms) { int rc = pcmk_rc_ok; time_t start = time(NULL); int remaining_timeout = 0; if (timeout_ms == 0) { timeout_ms = 10000; } else if (timeout_ms < 0) { timeout_ms = 60000; } remaining_timeout = timeout_ms; while (remaining_timeout > 0) { crm_trace("Waiting for remote data (%d ms of %d ms timeout remaining)", remaining_timeout, timeout_ms); rc = pcmk__remote_ready(remote, remaining_timeout); if (rc == ETIME) { crm_err("Timed out (%d ms) while waiting for remote data", remaining_timeout); return rc; } else if (rc != pcmk_rc_ok) { crm_debug("Wait for remote data aborted (will retry): %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); } else { rc = read_available_remote_data(remote); if (rc == pcmk_rc_ok) { return rc; } else if (rc == EAGAIN) { crm_trace("Waiting for more remote data"); } else { crm_debug("Could not receive remote data: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); } } // Don't waste time retrying after fatal errors if ((rc == ENOTCONN) || (rc == ESOCKTNOSUPPORT)) { return rc; } remaining_timeout = timeout_ms - ((time(NULL) - start) * 1000); } return ETIME; } struct tcp_async_cb_data { int sock; int timeout_ms; time_t start; void *userdata; void (*callback) (void *userdata, int rc, int sock); }; // \return TRUE if timer should be rescheduled, FALSE otherwise static gboolean check_connect_finished(gpointer userdata) { struct tcp_async_cb_data *cb_data = userdata; int rc; fd_set rset, wset; struct timeval ts = { 0, }; if (cb_data->start == 0) { // Last connect() returned success immediately rc = pcmk_rc_ok; goto dispatch_done; } // If the socket is ready for reading or writing, the connect succeeded FD_ZERO(&rset); FD_SET(cb_data->sock, &rset); wset = rset; rc = select(cb_data->sock + 1, &rset, &wset, NULL, &ts); if (rc < 0) { // select() error rc = errno; if ((rc == EINPROGRESS) || (rc == EAGAIN)) { if ((time(NULL) - cb_data->start) < (cb_data->timeout_ms / 1000)) { return TRUE; // There is time left, so reschedule timer } else { rc = ETIMEDOUT; } } crm_trace("Could not check socket %d for connection success: %s (%d)", cb_data->sock, pcmk_rc_str(rc), rc); } else if (rc == 0) { // select() timeout if ((time(NULL) - cb_data->start) < (cb_data->timeout_ms / 1000)) { return TRUE; // There is time left, so reschedule timer } crm_debug("Timed out while waiting for socket %d connection success", cb_data->sock); rc = ETIMEDOUT; // select() returned number of file descriptors that are ready } else if (FD_ISSET(cb_data->sock, &rset) || FD_ISSET(cb_data->sock, &wset)) { // The socket is ready; check it for connection errors int error = 0; socklen_t len = sizeof(error); if (getsockopt(cb_data->sock, SOL_SOCKET, SO_ERROR, &error, &len) < 0) { rc = errno; crm_trace("Couldn't check socket %d for connection errors: %s (%d)", cb_data->sock, pcmk_rc_str(rc), rc); } else if (error != 0) { rc = error; crm_trace("Socket %d connected with error: %s (%d)", cb_data->sock, pcmk_rc_str(rc), rc); } else { rc = pcmk_rc_ok; } } else { // Should not be possible crm_trace("select() succeeded, but socket %d not in resulting " "read/write sets", cb_data->sock); rc = EAGAIN; } dispatch_done: if (rc == pcmk_rc_ok) { crm_trace("Socket %d is connected", cb_data->sock); } else { close(cb_data->sock); cb_data->sock = -1; } if (cb_data->callback) { cb_data->callback(cb_data->userdata, rc, cb_data->sock); } free(cb_data); return FALSE; // Do not reschedule timer } /*! * \internal * \brief Attempt to connect socket, calling callback when done * * Set a given socket non-blocking, then attempt to connect to it, * retrying periodically until success or a timeout is reached. * Call a caller-supplied callback function when completed. * * \param[in] sock Newly created socket * \param[in] addr Socket address information for connect * \param[in] addrlen Size of socket address information in bytes * \param[in] timeout_ms Fail if not connected within this much time * \param[out] timer_id If not NULL, store retry timer ID here * \param[in] userdata User data to pass to callback * \param[in] callback Function to call when connection attempt completes * * \return Standard Pacemaker return code */ static int connect_socket_retry(int sock, const struct sockaddr *addr, socklen_t addrlen, int timeout_ms, int *timer_id, void *userdata, void (*callback) (void *userdata, int rc, int sock)) { int rc = 0; int interval = 500; int timer; struct tcp_async_cb_data *cb_data = NULL; rc = pcmk__set_nonblocking(sock); if (rc != pcmk_rc_ok) { crm_warn("Could not set socket non-blocking: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); return rc; } rc = connect(sock, addr, addrlen); if (rc < 0 && (errno != EINPROGRESS) && (errno != EAGAIN)) { rc = errno; crm_warn("Could not connect socket: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); return rc; } cb_data = calloc(1, sizeof(struct tcp_async_cb_data)); cb_data->userdata = userdata; cb_data->callback = callback; cb_data->sock = sock; cb_data->timeout_ms = timeout_ms; if (rc == 0) { /* The connect was successful immediately, we still return to mainloop * and let this callback get called later. This avoids the user of this api * to have to account for the fact the callback could be invoked within this * function before returning. */ cb_data->start = 0; interval = 1; } else { cb_data->start = time(NULL); } /* This timer function does a non-blocking poll on the socket to see if we * can use it. Once we can, the connect has completed. This method allows us * to connect without blocking the mainloop. * * @TODO Use a mainloop fd callback for this instead of polling. Something * about the way mainloop is currently polling prevents this from * working at the moment though. (See connect(2) regarding EINPROGRESS * for possible new handling needed.) */ crm_trace("Scheduling check in %dms for whether connect to fd %d finished", interval, sock); timer = g_timeout_add(interval, check_connect_finished, cb_data); if (timer_id) { *timer_id = timer; } // timer callback should be taking care of cb_data // cppcheck-suppress memleak return pcmk_rc_ok; } /*! * \internal * \brief Attempt once to connect socket and set it non-blocking * * \param[in] sock Newly created socket * \param[in] addr Socket address information for connect * \param[in] addrlen Size of socket address information in bytes * * \return Standard Pacemaker return code */ static int connect_socket_once(int sock, const struct sockaddr *addr, socklen_t addrlen) { int rc = connect(sock, addr, addrlen); if (rc < 0) { rc = errno; crm_warn("Could not connect socket: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); return rc; } rc = pcmk__set_nonblocking(sock); if (rc != pcmk_rc_ok) { crm_warn("Could not set socket non-blocking: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); return rc; } return pcmk_ok; } /*! * \internal * \brief Connect to server at specified TCP port * * \param[in] host Name of server to connect to * \param[in] port Server port to connect to * \param[in] timeout_ms If asynchronous, fail if not connected in this time * \param[out] timer_id If asynchronous and this is non-NULL, retry timer ID * will be put here (for ease of cancelling by caller) * \param[out] sock_fd Where to store socket file descriptor * \param[in] userdata If asynchronous, data to pass to callback * \param[in] callback If NULL, attempt a single synchronous connection, * otherwise retry asynchronously then call this * * \return Standard Pacemaker return code */ int pcmk__connect_remote(const char *host, int port, int timeout, int *timer_id, int *sock_fd, void *userdata, void (*callback) (void *userdata, int rc, int sock)) { char buffer[INET6_ADDRSTRLEN]; struct addrinfo *res = NULL; struct addrinfo *rp = NULL; struct addrinfo hints; const char *server = host; int rc; int sock = -1; CRM_CHECK((host != NULL) && (sock_fd != NULL), return EINVAL); // Get host's IP address(es) memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_CANONNAME; rc = getaddrinfo(server, NULL, &hints, &res); if (rc != 0) { crm_err("Unable to get IP address info for %s: %s", server, gai_strerror(rc)); rc = ENOTCONN; goto async_cleanup; } if (!res || !res->ai_addr) { crm_err("Unable to get IP address info for %s: no result", server); rc = ENOTCONN; goto async_cleanup; } // getaddrinfo() returns a list of host's addresses, try them in order for (rp = res; rp != NULL; rp = rp->ai_next) { struct sockaddr *addr = rp->ai_addr; if (!addr) { continue; } if (rp->ai_canonname) { server = res->ai_canonname; } crm_debug("Got canonical name %s for %s", server, host); sock = socket(rp->ai_family, SOCK_STREAM, IPPROTO_TCP); if (sock == -1) { rc = errno; crm_warn("Could not create socket for remote connection to %s:%d: " "%s " CRM_XS " rc=%d", server, port, pcmk_rc_str(rc), rc); continue; } /* Set port appropriately for address family */ /* (void*) casts avoid false-positive compiler alignment warnings */ if (addr->sa_family == AF_INET6) { ((struct sockaddr_in6 *)(void*)addr)->sin6_port = htons(port); } else { ((struct sockaddr_in *)(void*)addr)->sin_port = htons(port); } memset(buffer, 0, DIMOF(buffer)); pcmk__sockaddr2str(addr, buffer); crm_info("Attempting remote connection to %s:%d", buffer, port); if (callback) { if (connect_socket_retry(sock, rp->ai_addr, rp->ai_addrlen, timeout, timer_id, userdata, callback) == pcmk_rc_ok) { goto async_cleanup; /* Success for now, we'll hear back later in the callback */ } } else if (connect_socket_once(sock, rp->ai_addr, rp->ai_addrlen) == pcmk_rc_ok) { break; /* Success */ } // Connect failed close(sock); sock = -1; rc = ENOTCONN; } async_cleanup: if (res) { freeaddrinfo(res); } *sock_fd = sock; return rc; } /*! * \internal * \brief Convert an IP address (IPv4 or IPv6) to a string for logging * * \param[in] sa Socket address for IP * \param[out] s Storage for at least INET6_ADDRSTRLEN bytes * * \note sa The socket address can be a pointer to struct sockaddr_in (IPv4), * struct sockaddr_in6 (IPv6) or struct sockaddr_storage (either), * as long as its sa_family member is set correctly. */ void pcmk__sockaddr2str(void *sa, char *s) { switch (((struct sockaddr*)sa)->sa_family) { case AF_INET: inet_ntop(AF_INET, &(((struct sockaddr_in *)sa)->sin_addr), s, INET6_ADDRSTRLEN); break; case AF_INET6: inet_ntop(AF_INET6, &(((struct sockaddr_in6 *)sa)->sin6_addr), s, INET6_ADDRSTRLEN); break; default: strcpy(s, ""); } } /*! * \internal * \brief Accept a client connection on a remote server socket * * \param[in] ssock Server socket file descriptor being listened on * \param[out] csock Where to put new client socket's file descriptor * * \return Standard Pacemaker return code */ int pcmk__accept_remote_connection(int ssock, int *csock) { int rc; struct sockaddr_storage addr; socklen_t laddr = sizeof(addr); char addr_str[INET6_ADDRSTRLEN]; /* accept the connection */ memset(&addr, 0, sizeof(addr)); *csock = accept(ssock, (struct sockaddr *)&addr, &laddr); if (*csock == -1) { rc = errno; crm_err("Could not accept remote client connection: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); return rc; } pcmk__sockaddr2str(&addr, addr_str); crm_info("Accepted new remote client connection from %s", addr_str); rc = pcmk__set_nonblocking(*csock); if (rc != pcmk_rc_ok) { crm_err("Could not set socket non-blocking: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc); close(*csock); *csock = -1; return rc; } #ifdef TCP_USER_TIMEOUT if (pcmk__get_sbd_timeout() > 0) { // Time to fail and retry before watchdog unsigned int optval = (unsigned int) pcmk__get_sbd_timeout() / 2; rc = setsockopt(*csock, SOL_TCP, TCP_USER_TIMEOUT, &optval, sizeof(optval)); if (rc < 0) { rc = errno; crm_err("Could not set TCP timeout to %d ms on remote connection: " "%s " CRM_XS " rc=%d", optval, pcmk_rc_str(rc), rc); close(*csock); *csock = -1; return rc; } } #endif return rc; } /*! * \brief Get the default remote connection TCP port on this host * * \return Remote connection TCP port number */ int crm_default_remote_port() { static int port = 0; if (port == 0) { const char *env = getenv("PCMK_remote_port"); if (env) { errno = 0; port = strtol(env, NULL, 10); if (errno || (port < 1) || (port > 65535)) { crm_warn("Environment variable PCMK_remote_port has invalid value '%s', using %d instead", env, DEFAULT_REMOTE_PORT); port = DEFAULT_REMOTE_PORT; } } else { port = DEFAULT_REMOTE_PORT; } } return port; } diff --git a/lib/common/strings.c b/lib/common/strings.c index 4be151d6a4..21b4130771 100644 --- a/lib/common/strings.c +++ b/lib/common/strings.c @@ -1,1115 +1,1111 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include // DBL_MIN #include #include // fabs() #include #include char * crm_itoa_stack(int an_int, char *buffer, size_t len) { if (buffer != NULL) { snprintf(buffer, len, "%d", an_int); } return buffer; } /*! * \internal * \brief Scan a long long integer from a string * * \param[in] text String to scan * \param[out] result If not NULL, where to store scanned value * \param[out] end_text If not NULL, where to store pointer to just after value * * \return Standard Pacemaker return code (\c pcmk_rc_ok on success, * \c EINVAL on failed string conversion due to invalid input, * or \c EOVERFLOW on arithmetic overflow) * \note Sets \c errno on error */ static int scan_ll(const char *text, long long *result, char **end_text) { long long local_result = PCMK__PARSE_INT_DEFAULT; char *local_end_text = NULL; int rc = pcmk_rc_ok; errno = 0; if (text != NULL) { -#ifdef ANSI_ONLY - local_result = (long long) strtol(text, &local_end_text, 10); -#else local_result = strtoll(text, &local_end_text, 10); -#endif if (errno == ERANGE) { rc = EOVERFLOW; crm_warn("Integer parsed from %s was clipped to %lld", text, local_result); } else if (errno != 0) { rc = errno; local_result = PCMK__PARSE_INT_DEFAULT; crm_warn("Could not parse integer from %s (using %d instead): %s", text, PCMK__PARSE_INT_DEFAULT, pcmk_rc_str(rc)); } else if (local_end_text == text) { rc = EINVAL; local_result = PCMK__PARSE_INT_DEFAULT; crm_warn("Could not parse integer from %s (using %d instead): " "No digits found", text, PCMK__PARSE_INT_DEFAULT); } if ((end_text == NULL) && !pcmk__str_empty(local_end_text)) { crm_warn("Characters left over after parsing '%s': '%s'", text, local_end_text); } errno = rc; } if (end_text != NULL) { *end_text = local_end_text; } if (result != NULL) { *result = local_result; } return rc; } /*! * \brief Parse a long long integer value from a string * * \param[in] text The string to parse * \param[in] default_text Default string to parse if text is NULL * * \return Parsed value on success, PCMK__PARSE_INT_DEFAULT (and set * errno) on error */ long long crm_parse_ll(const char *text, const char *default_text) { long long result; if (text == NULL) { text = default_text; if (text == NULL) { crm_err("No default conversion value supplied"); errno = EINVAL; return PCMK__PARSE_INT_DEFAULT; } } scan_ll(text, &result, NULL); return result; } /*! * \brief Parse an integer value from a string * * \param[in] text The string to parse * \param[in] default_text Default string to parse if text is NULL * * \return Parsed value on success, INT_MIN or INT_MAX (and set errno to * ERANGE) if parsed value is out of integer range, otherwise * PCMK__PARSE_INT_DEFAULT (and set errno) */ int crm_parse_int(const char *text, const char *default_text) { long long result = crm_parse_ll(text, default_text); if (result < INT_MIN) { // If errno is ERANGE, crm_parse_ll() has already logged a message if (errno != ERANGE) { crm_err("Conversion of %s was clipped: %lld", text, result); errno = ERANGE; } return INT_MIN; } else if (result > INT_MAX) { // If errno is ERANGE, crm_parse_ll() has already logged a message if (errno != ERANGE) { crm_err("Conversion of %s was clipped: %lld", text, result); errno = ERANGE; } return INT_MAX; } return (int) result; } /*! * \internal * \brief Scan a double-precision floating-point value from a string * * \param[in] text The string to parse * \param[out] result Parsed value on success, or * \c PCMK__PARSE_DBL_DEFAULT on error * \param[in] default_text Default string to parse if \p text is * \c NULL * \param[out] end_text If not \c NULL, where to store a pointer * to the position immediately after the * value * * \return Standard Pacemaker return code (\c pcmk_rc_ok on success, * \c EINVAL on failed string conversion due to invalid input, * \c EOVERFLOW on arithmetic overflow, \c pcmk_rc_underflow * on arithmetic underflow, or \c errno from \c strtod() on * other parse errors) */ int pcmk__scan_double(const char *text, double *result, const char *default_text, char **end_text) { int rc = pcmk_rc_ok; char *local_end_text = NULL; CRM_ASSERT(result != NULL); *result = PCMK__PARSE_DBL_DEFAULT; text = (text != NULL) ? text : default_text; if (text == NULL) { rc = EINVAL; crm_debug("No text and no default conversion value supplied"); } else { errno = 0; *result = strtod(text, &local_end_text); if (errno == ERANGE) { /* * Overflow: strtod() returns +/- HUGE_VAL and sets errno to * ERANGE * * Underflow: strtod() returns "a value whose magnitude is * no greater than the smallest normalized * positive" double. Whether ERANGE is set is * implementation-defined. */ const char *over_under; if (fabs(*result) > DBL_MIN) { rc = EOVERFLOW; over_under = "over"; } else { rc = pcmk_rc_underflow; over_under = "under"; } crm_debug("Floating-point value parsed from '%s' would %sflow " "(using %g instead)", text, over_under, *result); } else if (errno != 0) { rc = errno; // strtod() set *result = 0 on parse failure *result = PCMK__PARSE_DBL_DEFAULT; crm_debug("Could not parse floating-point value from '%s' (using " "%.1f instead): %s", text, PCMK__PARSE_DBL_DEFAULT, pcmk_rc_str(rc)); } else if (local_end_text == text) { // errno == 0, but nothing was parsed rc = EINVAL; *result = PCMK__PARSE_DBL_DEFAULT; crm_debug("Could not parse floating-point value from '%s' (using " "%.1f instead): No digits found", text, PCMK__PARSE_DBL_DEFAULT); } else if (fabs(*result) <= DBL_MIN) { /* * errno == 0 and text was parsed, but value might have * underflowed. * * ERANGE might not be set for underflow. Check magnitude * of *result, but also make sure the input number is not * actually zero (0 <= DBL_MIN is not underflow). * * This check must come last. A parse failure in strtod() * also sets *result == 0, so a parse failure would match * this test condition prematurely. */ for (const char *p = text; p != local_end_text; p++) { if (strchr("0.eE", *p) == NULL) { rc = pcmk_rc_underflow; crm_debug("Floating-point value parsed from '%s' would " "underflow (using %g instead)", text, *result); break; } } } else { crm_trace("Floating-point value parsed successfully from " "'%s': %g", text, *result); } if ((end_text == NULL) && !pcmk__str_empty(local_end_text)) { crm_debug("Characters left over after parsing '%s': '%s'", text, local_end_text); } } if (end_text != NULL) { *end_text = local_end_text; } return rc; } /*! * \internal * \brief Parse a guint from a string stored in a hash table * * \param[in] table Hash table to search * \param[in] key Hash table key to use to retrieve string * \param[in] default_val What to use if key has no entry in table * \param[out] result If not NULL, where to store parsed integer * * \return Standard Pacemaker return code */ int pcmk__guint_from_hash(GHashTable *table, const char *key, guint default_val, guint *result) { const char *value; long long value_ll; CRM_CHECK((table != NULL) && (key != NULL), return EINVAL); value = g_hash_table_lookup(table, key); if (value == NULL) { if (result != NULL) { *result = default_val; } return pcmk_rc_ok; } errno = 0; value_ll = crm_parse_ll(value, NULL); if (errno != 0) { return errno; // Message already logged } if ((value_ll < 0) || (value_ll > G_MAXUINT)) { crm_warn("Could not parse non-negative integer from %s", value); return ERANGE; } if (result != NULL) { *result = (guint) value_ll; } return pcmk_rc_ok; } #ifndef NUMCHARS # define NUMCHARS "0123456789." #endif #ifndef WHITESPACE # define WHITESPACE " \t\n\r\f" #endif /*! * \brief Parse a time+units string and return milliseconds equivalent * * \param[in] input String with a number and units (optionally with whitespace * before and/or after the number) * * \return Milliseconds corresponding to string expression, or * PCMK__PARSE_INT_DEFAULT on error */ long long crm_get_msec(const char *input) { const char *num_start = NULL; const char *units; long long multiplier = 1000; long long divisor = 1; long long msec = PCMK__PARSE_INT_DEFAULT; size_t num_len = 0; char *end_text = NULL; if (input == NULL) { return PCMK__PARSE_INT_DEFAULT; } num_start = input + strspn(input, WHITESPACE); num_len = strspn(num_start, NUMCHARS); if (num_len < 1) { return PCMK__PARSE_INT_DEFAULT; } units = num_start + num_len; units += strspn(units, WHITESPACE); if (!strncasecmp(units, "ms", 2) || !strncasecmp(units, "msec", 4)) { multiplier = 1; divisor = 1; } else if (!strncasecmp(units, "us", 2) || !strncasecmp(units, "usec", 4)) { multiplier = 1; divisor = 1000; } else if (!strncasecmp(units, "s", 1) || !strncasecmp(units, "sec", 3)) { multiplier = 1000; divisor = 1; } else if (!strncasecmp(units, "m", 1) || !strncasecmp(units, "min", 3)) { multiplier = 60 * 1000; divisor = 1; } else if (!strncasecmp(units, "h", 1) || !strncasecmp(units, "hr", 2)) { multiplier = 60 * 60 * 1000; divisor = 1; } else if ((*units != EOS) && (*units != '\n') && (*units != '\r')) { return PCMK__PARSE_INT_DEFAULT; } scan_ll(num_start, &msec, &end_text); if (msec > (LLONG_MAX / multiplier)) { // Arithmetics overflow while multiplier/divisor mutually exclusive return LLONG_MAX; } msec *= multiplier; msec /= divisor; return msec; } gboolean crm_is_true(const char *s) { gboolean ret = FALSE; if (s != NULL) { crm_str_to_boolean(s, &ret); } return ret; } int crm_str_to_boolean(const char *s, int *ret) { if (s == NULL) { return -1; } else if (strcasecmp(s, "true") == 0 || strcasecmp(s, "on") == 0 || strcasecmp(s, "yes") == 0 || strcasecmp(s, "y") == 0 || strcasecmp(s, "1") == 0) { *ret = TRUE; return 1; } else if (strcasecmp(s, "false") == 0 || strcasecmp(s, "off") == 0 || strcasecmp(s, "no") == 0 || strcasecmp(s, "n") == 0 || strcasecmp(s, "0") == 0) { *ret = FALSE; return 1; } return -1; } char * crm_strip_trailing_newline(char *str) { int len; if (str == NULL) { return str; } for (len = strlen(str) - 1; len >= 0 && str[len] == '\n'; len--) { str[len] = '\0'; } return str; } /*! * \brief Check whether a string starts with a certain sequence * * \param[in] str String to check * \param[in] prefix Sequence to match against beginning of \p str * * \return \c true if \p str begins with match, \c false otherwise * \note This is equivalent to !strncmp(s, prefix, strlen(prefix)) * but is likely less efficient when prefix is a string literal * if the compiler optimizes away the strlen() at compile time, * and more efficient otherwise. */ bool pcmk__starts_with(const char *str, const char *prefix) { const char *s = str; const char *p = prefix; if (!s || !p) { return false; } while (*s && *p) { if (*s++ != *p++) { return false; } } return (*p == 0); } static inline bool ends_with(const char *s, const char *match, bool as_extension) { if (pcmk__str_empty(match)) { return true; } else if (s == NULL) { return false; } else { size_t slen, mlen; /* Besides as_extension, we could also check !strchr(&match[1], match[0]) but that would be inefficient. */ if (as_extension) { s = strrchr(s, match[0]); return (s == NULL)? false : !strcmp(s, match); } mlen = strlen(match); slen = strlen(s); return ((slen >= mlen) && !strcmp(s + slen - mlen, match)); } } /*! * \internal * \brief Check whether a string ends with a certain sequence * * \param[in] s String to check * \param[in] match Sequence to match against end of \p s * * \return \c true if \p s ends case-sensitively with match, \c false otherwise * \note pcmk__ends_with_ext() can be used if the first character of match * does not recur in match. */ bool pcmk__ends_with(const char *s, const char *match) { return ends_with(s, match, false); } /*! * \internal * \brief Check whether a string ends with a certain "extension" * * \param[in] s String to check * \param[in] match Extension to match against end of \p s, that is, * its first character must not occur anywhere * in the rest of that very sequence (example: file * extension where the last dot is its delimiter, * e.g., ".html"); incorrect results may be * returned otherwise. * * \return \c true if \p s ends (verbatim, i.e., case sensitively) * with "extension" designated as \p match (including empty * string), \c false otherwise * * \note Main incentive to prefer this function over \c pcmk__ends_with() * where possible is the efficiency (at the cost of added * restriction on \p match as stated; the complexity class * remains the same, though: BigO(M+N) vs. BigO(M+2N)). */ bool pcmk__ends_with_ext(const char *s, const char *match) { return ends_with(s, match, true); } /* * This re-implements g_str_hash as it was prior to glib2-2.28: * * https://gitlab.gnome.org/GNOME/glib/commit/354d655ba8a54b754cb5a3efb42767327775696c * * Note that the new g_str_hash is presumably a *better* hash (it's actually * a correct implementation of DJB's hash), but we need to preserve existing * behaviour, because the hash key ultimately determines the "sort" order * when iterating through GHashTables, which affects allocation of scores to * clone instances when iterating through rsc->allowed_nodes. It (somehow) * also appears to have some minor impact on the ordering of a few * pseudo_event IDs in the transition graph. */ guint g_str_hash_traditional(gconstpointer v) { const signed char *p; guint32 h = 0; for (p = v; *p != '\0'; p++) h = (h << 5) - h + *p; return h; } /* used with hash tables where case does not matter */ gboolean crm_strcase_equal(gconstpointer a, gconstpointer b) { return pcmk__str_eq((const char *)a, (const char *)b, pcmk__str_casei); } guint crm_strcase_hash(gconstpointer v) { const signed char *p; guint32 h = 0; for (p = v; *p != '\0'; p++) h = (h << 5) - h + g_ascii_tolower(*p); return h; } static void copy_str_table_entry(gpointer key, gpointer value, gpointer user_data) { if (key && value && user_data) { g_hash_table_insert((GHashTable*)user_data, strdup(key), strdup(value)); } } GHashTable * crm_str_table_dup(GHashTable *old_table) { GHashTable *new_table = NULL; if (old_table) { new_table = crm_str_table_new(); g_hash_table_foreach(old_table, copy_str_table_entry, new_table); } return new_table; } /*! * \internal * \brief Add a word to a string list of words * * \param[in,out] list Pointer to current string list (may not be NULL) * \param[in,out] len If not NULL, must be set to length of \p list, * and will be updated to new length of \p list * \param[in] word String to add to \p list (\p list will be * unchanged if this is NULL or the empty string) * \param[in] separator String to separate words in \p list * (a space will be used if this is NULL) * * \note This dynamically reallocates \p list as needed. \p word may contain * \p separator, though that would be a bad idea if the string needs to be * parsed later. */ void pcmk__add_separated_word(char **list, size_t *len, const char *word, const char *separator) { size_t orig_len, new_len; CRM_ASSERT(list != NULL); if (pcmk__str_empty(word)) { return; } // Use provided length, or calculate it if not available orig_len = (len != NULL)? *len : ((*list == NULL)? 0 : strlen(*list)); // Don't add a separator before the first word in the list if (orig_len == 0) { separator = ""; // Default to space-separated } else if (separator == NULL) { separator = " "; } new_len = orig_len + strlen(separator) + strlen(word); if (len != NULL) { *len = new_len; } // +1 for null terminator *list = pcmk__realloc(*list, new_len + 1); sprintf(*list + orig_len, "%s%s", separator, word); } /*! * \internal * \brief Compress data * * \param[in] data Data to compress * \param[in] length Number of characters of data to compress * \param[in] max Maximum size of compressed data (or 0 to estimate) * \param[out] result Where to store newly allocated compressed result * \param[out] result_len Where to store actual compressed length of result * * \return Standard Pacemaker return code */ int pcmk__compress(const char *data, unsigned int length, unsigned int max, char **result, unsigned int *result_len) { int rc; char *compressed = NULL; char *uncompressed = strdup(data); #ifdef CLOCK_MONOTONIC struct timespec after_t; struct timespec before_t; #endif if (max == 0) { max = (length * 1.01) + 601; // Size guaranteed to hold result } #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &before_t); #endif compressed = calloc((size_t) max, sizeof(char)); CRM_ASSERT(compressed); *result_len = max; rc = BZ2_bzBuffToBuffCompress(compressed, result_len, uncompressed, length, CRM_BZ2_BLOCKS, 0, CRM_BZ2_WORK); free(uncompressed); if (rc != BZ_OK) { crm_err("Compression of %d bytes failed: %s " CRM_XS " bzerror=%d", length, bz2_strerror(rc), rc); free(compressed); return pcmk_rc_error; } #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &after_t); crm_trace("Compressed %d bytes into %d (ratio %d:1) in %.0fms", length, *result_len, length / (*result_len), (after_t.tv_sec - before_t.tv_sec) * 1000 + (after_t.tv_nsec - before_t.tv_nsec) / 1e6); #else crm_trace("Compressed %d bytes into %d (ratio %d:1)", length, *result_len, length / (*result_len)); #endif *result = compressed; return pcmk_rc_ok; } char * crm_strdup_printf(char const *format, ...) { va_list ap; int len = 0; char *string = NULL; va_start(ap, format); len = vasprintf (&string, format, ap); CRM_ASSERT(len > 0); va_end(ap); return string; } int pcmk__parse_ll_range(const char *srcstring, long long *start, long long *end) { char *remainder = NULL; CRM_ASSERT(start != NULL && end != NULL); *start = PCMK__PARSE_INT_DEFAULT; *end = PCMK__PARSE_INT_DEFAULT; crm_trace("Attempting to decode: [%s]", srcstring); if (pcmk__str_empty(srcstring) || !strcmp(srcstring, "-")) { return pcmk_rc_unknown_format; } /* String starts with a dash, so this is either a range with * no beginning or garbage. * */ if (*srcstring == '-') { int rc = scan_ll(srcstring+1, end, &remainder); if (rc != pcmk_rc_ok || *remainder != '\0') { return pcmk_rc_unknown_format; } else { return pcmk_rc_ok; } } if (scan_ll(srcstring, start, &remainder) != pcmk_rc_ok) { return pcmk_rc_unknown_format; } if (*remainder && *remainder == '-') { if (*(remainder+1)) { char *more_remainder = NULL; int rc = scan_ll(remainder+1, end, &more_remainder); if (rc != pcmk_rc_ok || *more_remainder != '\0') { return pcmk_rc_unknown_format; } } } else if (*remainder && *remainder != '-') { *start = PCMK__PARSE_INT_DEFAULT; return pcmk_rc_unknown_format; } else { /* The input string contained only one number. Set start and end * to the same value and return pcmk_rc_ok. This gives the caller * a way to tell this condition apart from a range with no end. */ *end = *start; } return pcmk_rc_ok; } /*! * \internal * \brief Find a string in a list of strings * * Search \p lst for \p s, taking case into account. As a special case, * if "*" is the only element of \p lst, the search is successful. * * \param[in] lst List to search * \param[in] s String to search for * * \return \c TRUE if \p s is in \p lst, or \c FALSE otherwise */ gboolean pcmk__str_in_list(GList *lst, const gchar *s) { if (lst == NULL) { return FALSE; } if (strcmp(lst->data, "*") == 0 && lst->next == NULL) { return TRUE; } return g_list_find_custom(lst, s, (GCompareFunc) strcmp) != NULL; } static bool str_any_of(bool casei, const char *s, va_list args) { bool rc = false; if (s != NULL) { while (1) { const char *ele = va_arg(args, const char *); if (ele == NULL) { break; } else if (pcmk__str_eq(s, ele, casei? pcmk__str_casei : pcmk__str_none)) { rc = true; break; } } } return rc; } /*! * \internal * \brief Is a string a member of a list of strings? * * \param[in] s String to search for in \p ... * \param[in] ... Strings to compare \p s against. The final string * must be NULL. * * \note The comparison is done case-insensitively. The function name is * meant to be reminiscent of strcasecmp. * * \return \c true if \p s is in \p ..., or \c false otherwise */ bool pcmk__strcase_any_of(const char *s, ...) { va_list ap; bool rc; va_start(ap, s); rc = str_any_of(true, s, ap); va_end(ap); return rc; } /*! * \internal * \brief Is a string a member of a list of strings? * * \param[in] s String to search for in \p ... * \param[in] ... Strings to compare \p s against. The final string * must be NULL. * * \note The comparison is done taking case into account. * * \return \c true if \p s is in \p ..., or \c false otherwise */ bool pcmk__str_any_of(const char *s, ...) { va_list ap; bool rc; va_start(ap, s); rc = str_any_of(false, s, ap); va_end(ap); return rc; } /*! * \internal * \brief Check whether a character is in any of a list of strings * * \param[in] ch Character (ASCII) to search for * \param[in] ... Strings to search. Final argument must be * \c NULL. * * \return \c true if any of \p ... contain \p ch, \c false otherwise * \note \p ... must contain at least one argument (\c NULL). */ bool pcmk__char_in_any_str(int ch, ...) { bool rc = false; va_list ap; /* * Passing a char to va_start() can generate compiler warnings, * so ch is declared as an int. */ va_start(ap, ch); while (1) { const char *ele = va_arg(ap, const char *); if (ele == NULL) { break; } else if (strchr(ele, ch) != NULL) { rc = true; break; } } va_end(ap); return rc; } /*! * \brief Sort strings, with numeric portions sorted numerically * * Sort two strings case-insensitively like strcasecmp(), but with any numeric * portions of the string sorted numerically. This is particularly useful for * node names (for example, "node10" will sort higher than "node9" but lower * than "remotenode9"). * * \param[in] s1 First string to compare (must not be NULL) * \param[in] s2 Second string to compare (must not be NULL) * * \retval -1 \p s1 comes before \p s2 * \retval 0 \p s1 and \p s2 are equal * \retval 1 \p s1 comes after \p s2 */ int pcmk_numeric_strcasecmp(const char *s1, const char *s2) { while (*s1 && *s2) { if (isdigit(*s1) && isdigit(*s2)) { // If node names contain a number, sort numerically char *end1 = NULL; char *end2 = NULL; long num1 = strtol(s1, &end1, 10); long num2 = strtol(s2, &end2, 10); // allow ordering e.g. 007 > 7 size_t len1 = end1 - s1; size_t len2 = end2 - s2; if (num1 < num2) { return -1; } else if (num1 > num2) { return 1; } else if (len1 < len2) { return -1; } else if (len1 > len2) { return 1; } s1 = end1; s2 = end2; } else { // Compare non-digits case-insensitively int lower1 = tolower(*s1); int lower2 = tolower(*s2); if (lower1 < lower2) { return -1; } else if (lower1 > lower2) { return 1; } ++s1; ++s2; } } if (!*s1 && *s2) { return -1; } else if (*s1 && !*s2) { return 1; } return 0; } /* * \brief Sort strings. * * This is your one-stop function for string comparison. By default, this * function works like g_strcmp0. That is, like strcmp but a NULL string * sorts before a non-NULL string. * * Behavior can be changed with various flags: * * - pcmk__str_regex - The second string is a regular expression that the * first string will be matched against. * - pcmk__str_casei - By default, comparisons are done taking case into * account. This flag makes comparisons case-insensitive. * This can be combined with pcmk__str_regex. * - pcmk__str_null_matches - If one string is NULL and the other is not, * still return 0. * * \param[in] s1 First string to compare * \param[in] s2 Second string to compare, or a regular expression to * match if pcmk__str_regex is set * \param[in] flags A bitfield of pcmk__str_flags to modify operation * * \retval -1 \p s1 is NULL or comes before \p s2 * \retval 0 \p s1 and \p s2 are equal, or \p s1 is found in \p s2 if * pcmk__str_regex is set * \retval 1 \p s2 is NULL or \p s1 comes after \p s2, or if \p s2 * is an invalid regular expression, or \p s1 was not found * in \p s2 if pcmk__str_regex is set. */ int pcmk__strcmp(const char *s1, const char *s2, uint32_t flags) { /* If this flag is set, the second string is a regex. */ if (pcmk_is_set(flags, pcmk__str_regex)) { regex_t *r_patt = calloc(1, sizeof(regex_t)); int reg_flags = REG_EXTENDED | REG_NOSUB; int regcomp_rc = 0; int rc = 0; if (s1 == NULL || s2 == NULL) { free(r_patt); return 1; } if (pcmk_is_set(flags, pcmk__str_casei)) { reg_flags |= REG_ICASE; } regcomp_rc = regcomp(r_patt, s2, reg_flags); if (regcomp_rc != 0) { rc = 1; crm_err("Bad regex '%s' for update: %s", s2, strerror(regcomp_rc)); } else { rc = regexec(r_patt, s1, 0, NULL, 0); if (rc != 0) { rc = 1; } } regfree(r_patt); free(r_patt); return rc; } /* If the strings are the same pointer, return 0 immediately. */ if (s1 == s2) { return 0; } /* If this flag is set, return 0 if either (or both) of the input strings * are NULL. If neither one is NULL, we need to continue and compare * them normally. */ if (pcmk_is_set(flags, pcmk__str_null_matches)) { if (s1 == NULL || s2 == NULL) { return 0; } } /* Handle the cases where one is NULL and the str_null_matches flag is not set. * A NULL string always sorts to the beginning. */ if (s1 == NULL) { return -1; } else if (s2 == NULL) { return 1; } if (pcmk_is_set(flags, pcmk__str_casei)) { return strcasecmp(s1, s2); } else { return strcmp(s1, s2); } } // Deprecated functions kept only for backward API compatibility gboolean safe_str_neq(const char *a, const char *b); gboolean crm_str_eq(const char *a, const char *b, gboolean use_case); //! \deprecated Use pcmk__str_eq() instead gboolean safe_str_neq(const char *a, const char *b) { if (a == b) { return FALSE; } else if (a == NULL || b == NULL) { return TRUE; } else if (strcasecmp(a, b) == 0) { return FALSE; } return TRUE; } //! \deprecated Use pcmk__str_eq() instead gboolean crm_str_eq(const char *a, const char *b, gboolean use_case) { if (use_case) { return g_strcmp0(a, b) == 0; /* TODO - Figure out which calls, if any, really need to be case independent */ } else if (a == b) { return TRUE; } else if (a == NULL || b == NULL) { /* shouldn't be comparing NULLs */ return FALSE; } else if (strcasecmp(a, b) == 0) { return TRUE; } return FALSE; } diff --git a/lib/common/xml.c b/lib/common/xml.c index 8b7191140f..dc9d5564ed 100644 --- a/lib/common/xml.c +++ b/lib/common/xml.c @@ -1,3001 +1,2989 @@ /* * Copyright 2004-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include /* xmlAllocOutputBuffer */ #include #include #include #include #include // PCMK__XML_LOG_BASE, etc. #include "crmcommon_private.h" // Define this as 1 in development to get insanely verbose trace messages #ifndef XML_PARSER_DEBUG #define XML_PARSER_DEBUG 0 #endif /* @TODO XML_PARSE_RECOVER allows some XML errors to be silently worked around * by libxml2, which is potentially ambiguous and dangerous. We should drop it * when we can break backward compatibility with configurations that might be * relying on it (i.e. pacemaker 3.0.0). * * It might be a good idea to have a transitional period where we first try * parsing without XML_PARSE_RECOVER, and if that fails, try parsing again with * it, logging a warning if it succeeds. */ #define PCMK__XML_PARSE_OPTS (XML_PARSE_NOBLANKS | XML_PARSE_RECOVER) #define CHUNK_SIZE 1024 bool pcmk__tracking_xml_changes(xmlNode *xml, bool lazy) { if(xml == NULL || xml->doc == NULL || xml->doc->_private == NULL) { return FALSE; } else if (!pcmk_is_set(((xml_private_t *)xml->doc->_private)->flags, xpf_tracking)) { return FALSE; } else if (lazy && !pcmk_is_set(((xml_private_t *)xml->doc->_private)->flags, xpf_lazy)) { return FALSE; } return TRUE; } #define buffer_print(buffer, max, offset, fmt, args...) do { \ int rc = (max); \ if(buffer) { \ rc = snprintf((buffer) + (offset), (max) - (offset), fmt, ##args); \ } \ if(buffer && rc < 0) { \ crm_perror(LOG_ERR, "snprintf failed at offset %d", offset); \ (buffer)[(offset)] = 0; \ break; \ } else if(rc >= ((max) - (offset))) { \ char *tmp = NULL; \ (max) = QB_MAX(CHUNK_SIZE, (max) * 2); \ tmp = pcmk__realloc((buffer), (max)); \ CRM_ASSERT(tmp); \ (buffer) = tmp; \ } else { \ offset += rc; \ break; \ } \ } while(1); static void insert_prefix(int options, char **buffer, int *offset, int *max, int depth) { if (options & xml_log_option_formatted) { size_t spaces = 2 * depth; if ((*buffer) == NULL || spaces >= ((*max) - (*offset))) { (*max) = QB_MAX(CHUNK_SIZE, (*max) * 2); (*buffer) = pcmk__realloc((*buffer), (*max)); } memset((*buffer) + (*offset), ' ', spaces); (*offset) += spaces; } } static void set_parent_flag(xmlNode *xml, long flag) { for(; xml; xml = xml->parent) { xml_private_t *p = xml->_private; if(p == NULL) { /* During calls to xmlDocCopyNode(), _private will be unset for parent nodes */ } else { pcmk__set_xml_flags(p, flag); } } } void pcmk__set_xml_doc_flag(xmlNode *xml, enum xml_private_flags flag) { if(xml && xml->doc && xml->doc->_private){ /* During calls to xmlDocCopyNode(), xml->doc may be unset */ xml_private_t *p = xml->doc->_private; pcmk__set_xml_flags(p, flag); } } // Mark document, element, and all element's parents as changed static void mark_xml_node_dirty(xmlNode *xml) { pcmk__set_xml_doc_flag(xml, xpf_dirty); set_parent_flag(xml, xpf_dirty); } // Clear flags on XML node and its children static void reset_xml_node_flags(xmlNode *xml) { xmlNode *cIter = NULL; xml_private_t *p = xml->_private; if (p) { p->flags = 0; } for (cIter = pcmk__xml_first_child(xml); cIter != NULL; cIter = pcmk__xml_next(cIter)) { reset_xml_node_flags(cIter); } } // Set xpf_created flag on XML node and any children void pcmk__mark_xml_created(xmlNode *xml) { xmlNode *cIter = NULL; xml_private_t *p = xml->_private; if (p && pcmk__tracking_xml_changes(xml, FALSE)) { if (!pcmk_is_set(p->flags, xpf_created)) { pcmk__set_xml_flags(p, xpf_created); mark_xml_node_dirty(xml); } for (cIter = pcmk__xml_first_child(xml); cIter != NULL; cIter = pcmk__xml_next(cIter)) { pcmk__mark_xml_created(cIter); } } } void pcmk__mark_xml_attr_dirty(xmlAttr *a) { xmlNode *parent = a->parent; xml_private_t *p = NULL; p = a->_private; pcmk__set_xml_flags(p, xpf_dirty|xpf_modified); pcmk__clear_xml_flags(p, xpf_deleted); mark_xml_node_dirty(parent); } #define XML_PRIVATE_MAGIC (long) 0x81726354 // Free an XML object previously marked as deleted static void free_deleted_object(void *data) { if(data) { pcmk__deleted_xml_t *deleted_obj = data; free(deleted_obj->path); free(deleted_obj); } } // Free and NULL user, ACLs, and deleted objects in an XML node's private data static void reset_xml_private_data(xml_private_t *p) { if(p) { CRM_ASSERT(p->check == XML_PRIVATE_MAGIC); free(p->user); p->user = NULL; if(p->acls) { pcmk__free_acls(p->acls); p->acls = NULL; } if(p->deleted_objs) { g_list_free_full(p->deleted_objs, free_deleted_object); p->deleted_objs = NULL; } } } // Free all private data associated with an XML node static void free_private_data(xmlNode *node) { /* need to explicitly avoid our custom _private field cleanup when called from internal XSLT cleanup (xsltApplyStylesheetInternal -> xsltFreeTransformContext -> xsltFreeRVTs -> xmlFreeDoc) onto result tree fragments, represented as standalone documents with otherwise infeasible space-prefixed name (xsltInternals.h: XSLT_MARK_RES_TREE_FRAG) and carrying it's own load at _private field -- later assert on the XML_PRIVATE_MAGIC would explode */ if (node->type != XML_DOCUMENT_NODE || node->name == NULL || node->name[0] != ' ') { reset_xml_private_data(node->_private); free(node->_private); } } // Allocate and initialize private data for an XML node static void new_private_data(xmlNode *node) { xml_private_t *p = NULL; switch(node->type) { case XML_ELEMENT_NODE: case XML_DOCUMENT_NODE: case XML_ATTRIBUTE_NODE: case XML_COMMENT_NODE: p = calloc(1, sizeof(xml_private_t)); p->check = XML_PRIVATE_MAGIC; /* Flags will be reset if necessary when tracking is enabled */ pcmk__set_xml_flags(p, xpf_dirty|xpf_created); node->_private = p; break; case XML_TEXT_NODE: case XML_DTD_NODE: case XML_CDATA_SECTION_NODE: break; default: /* Ignore */ crm_trace("Ignoring %p %d", node, node->type); CRM_LOG_ASSERT(node->type == XML_ELEMENT_NODE); break; } if(p && pcmk__tracking_xml_changes(node, FALSE)) { /* XML_ELEMENT_NODE doesn't get picked up here, node->doc is * not hooked up at the point we are called */ mark_xml_node_dirty(node); } } void xml_track_changes(xmlNode * xml, const char *user, xmlNode *acl_source, bool enforce_acls) { xml_accept_changes(xml); crm_trace("Tracking changes%s to %p", enforce_acls?" with ACLs":"", xml); pcmk__set_xml_doc_flag(xml, xpf_tracking); if(enforce_acls) { if(acl_source == NULL) { acl_source = xml; } pcmk__set_xml_doc_flag(xml, xpf_acl_enabled); pcmk__unpack_acl(acl_source, xml, user); pcmk__apply_acl(xml); } } bool xml_tracking_changes(xmlNode * xml) { return (xml != NULL) && (xml->doc != NULL) && (xml->doc->_private != NULL) && pcmk_is_set(((xml_private_t *)(xml->doc->_private))->flags, xpf_tracking); } bool xml_document_dirty(xmlNode *xml) { return (xml != NULL) && (xml->doc != NULL) && (xml->doc->_private != NULL) && pcmk_is_set(((xml_private_t *)(xml->doc->_private))->flags, xpf_dirty); } /*! * \internal * \brief Return ordinal position of an XML node among its siblings * * \param[in] xml XML node to check * \param[in] ignore_if_set Don't count siblings with this flag set * * \return Ordinal position of \p xml (starting with 0) */ int pcmk__xml_position(xmlNode *xml, enum xml_private_flags ignore_if_set) { int position = 0; xmlNode *cIter = NULL; for(cIter = xml; cIter->prev; cIter = cIter->prev) { xml_private_t *p = ((xmlNode*)cIter->prev)->_private; if (!pcmk_is_set(p->flags, ignore_if_set)) { position++; } } return position; } // This also clears attribute's flags if not marked as deleted static bool marked_as_deleted(xmlAttrPtr a, void *user_data) { xml_private_t *p = a->_private; if (pcmk_is_set(p->flags, xpf_deleted)) { return true; } p->flags = xpf_none; return false; } // Remove all attributes marked as deleted from an XML node static void accept_attr_deletions(xmlNode *xml) { // Clear XML node's flags ((xml_private_t *) xml->_private)->flags = xpf_none; // Remove this XML node's attributes that were marked as deleted pcmk__xe_remove_matching_attrs(xml, marked_as_deleted, NULL); // Recursively do the same for this XML node's children for (xmlNodePtr cIter = pcmk__xml_first_child(xml); cIter != NULL; cIter = pcmk__xml_next(cIter)) { accept_attr_deletions(cIter); } } /*! * \internal * \brief Find first child XML node matching another given XML node * * \param[in] haystack XML whose children should be checked * \param[in] needle XML to match (comment content or element name and ID) * \param[in] exact If true and needle is a comment, position must match */ xmlNode * pcmk__xml_match(xmlNode *haystack, xmlNode *needle, bool exact) { CRM_CHECK(needle != NULL, return NULL); if (needle->type == XML_COMMENT_NODE) { return pcmk__xc_match(haystack, needle, exact); } else { const char *id = ID(needle); const char *attr = (id == NULL)? NULL : XML_ATTR_ID; return pcmk__xe_match(haystack, crm_element_name(needle), attr, id); } } void xml_log_changes(uint8_t log_level, const char *function, xmlNode * xml) { GListPtr gIter = NULL; xml_private_t *doc = NULL; if (log_level == LOG_NEVER) { return; } CRM_ASSERT(xml); CRM_ASSERT(xml->doc); doc = xml->doc->_private; if (!pcmk_is_set(doc->flags, xpf_dirty)) { return; } for(gIter = doc->deleted_objs; gIter; gIter = gIter->next) { pcmk__deleted_xml_t *deleted_obj = gIter->data; if (deleted_obj->position >= 0) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s (%d)", deleted_obj->path, deleted_obj->position); } else { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s", deleted_obj->path); } } log_data_element(log_level, __FILE__, function, __LINE__, "+ ", xml, 0, xml_log_option_formatted|xml_log_option_dirty_add); } void xml_accept_changes(xmlNode * xml) { xmlNode *top = NULL; xml_private_t *doc = NULL; if(xml == NULL) { return; } crm_trace("Accepting changes to %p", xml); doc = xml->doc->_private; top = xmlDocGetRootElement(xml->doc); reset_xml_private_data(xml->doc->_private); if (!pcmk_is_set(doc->flags, xpf_dirty)) { doc->flags = xpf_none; return; } doc->flags = xpf_none; accept_attr_deletions(top); } xmlNode * find_xml_node(xmlNode * root, const char *search_path, gboolean must_find) { xmlNode *a_child = NULL; const char *name = "NULL"; if (root != NULL) { name = crm_element_name(root); } if (search_path == NULL) { crm_warn("Will never find "); return NULL; } for (a_child = pcmk__xml_first_child(root); a_child != NULL; a_child = pcmk__xml_next(a_child)) { if (strcmp((const char *)a_child->name, search_path) == 0) { /* crm_trace("returning node (%s).", crm_element_name(a_child)); */ return a_child; } } if (must_find) { crm_warn("Could not find %s in %s.", search_path, name); } else if (root != NULL) { crm_trace("Could not find %s in %s.", search_path, name); } else { crm_trace("Could not find %s in .", search_path); } return NULL; } #define attr_matches(c, n, v) pcmk__str_eq(crm_element_value((c), (n)), \ (v), pcmk__str_none) /*! * \internal * \brief Find first XML child element matching given criteria * * \param[in] parent XML element to search * \param[in] node_name If not NULL, only match children of this type * \param[in] attr_n If not NULL, only match children with an attribute * of this name and a value of \p attr_v * \param[in] attr_v If \p attr_n and this are not NULL, only match children * with an attribute named \p attr_n and this value * * \return Matching XML child element, or NULL if none found */ xmlNode * pcmk__xe_match(xmlNode *parent, const char *node_name, const char *attr_n, const char *attr_v) { /* ensure attr_v specified when attr_n is */ CRM_CHECK(attr_n == NULL || attr_v != NULL, return NULL); for (xmlNode *child = pcmk__xml_first_child(parent); child != NULL; child = pcmk__xml_next(child)) { if (pcmk__str_eq(node_name, (const char *) (child->name), pcmk__str_null_matches) && ((attr_n == NULL) || attr_matches(child, attr_n, attr_v))) { return child; } } crm_trace("XML child node <%s%s%s%s%s> not found in %s", (node_name? node_name : "(any)"), (attr_n? " " : ""), (attr_n? attr_n : ""), (attr_n? "=" : ""), (attr_n? attr_v : ""), crm_element_name(parent)); return NULL; } void copy_in_properties(xmlNode * target, xmlNode * src) { if (src == NULL) { crm_warn("No node to copy properties from"); } else if (target == NULL) { crm_err("No node to copy properties into"); } else { for (xmlAttrPtr a = pcmk__xe_first_attr(src); a != NULL; a = a->next) { const char *p_name = (const char *) a->name; const char *p_value = pcmk__xml_attr_value(a); expand_plus_plus(target, p_name, p_value); } } return; } void fix_plus_plus_recursive(xmlNode * target) { /* TODO: Remove recursion and use xpath searches for value++ */ xmlNode *child = NULL; for (xmlAttrPtr a = pcmk__xe_first_attr(target); a != NULL; a = a->next) { const char *p_name = (const char *) a->name; const char *p_value = pcmk__xml_attr_value(a); expand_plus_plus(target, p_name, p_value); } for (child = pcmk__xml_first_child(target); child != NULL; child = pcmk__xml_next(child)) { fix_plus_plus_recursive(child); } } void expand_plus_plus(xmlNode * target, const char *name, const char *value) { int offset = 1; int name_len = 0; int int_value = 0; int value_len = 0; const char *old_value = NULL; if (value == NULL || name == NULL) { return; } old_value = crm_element_value(target, name); if (old_value == NULL) { /* if no previous value, set unexpanded */ goto set_unexpanded; } else if (strstr(value, name) != value) { goto set_unexpanded; } name_len = strlen(name); value_len = strlen(value); if (value_len < (name_len + 2) || value[name_len] != '+' || (value[name_len + 1] != '+' && value[name_len + 1] != '=')) { goto set_unexpanded; } /* if we are expanding ourselves, * then no previous value was set and leave int_value as 0 */ if (old_value != value) { int_value = char2score(old_value); } if (value[name_len + 1] != '+') { const char *offset_s = value + (name_len + 2); offset = char2score(offset_s); } int_value += offset; if (int_value > INFINITY) { int_value = (int)INFINITY; } crm_xml_add_int(target, name, int_value); return; set_unexpanded: if (old_value == value) { /* the old value is already set, nothing to do */ return; } crm_xml_add(target, name, value); return; } /*! * \internal * \brief Remove an XML element's attributes that match some criteria * * \param[in,out] element XML element to modify * \param[in] match If not NULL, only remove attributes for which * this function returns true * \param[in] user_data Data to pass to \p match */ void pcmk__xe_remove_matching_attrs(xmlNode *element, bool (*match)(xmlAttrPtr, void *), void *user_data) { xmlAttrPtr next = NULL; for (xmlAttrPtr a = pcmk__xe_first_attr(element); a != NULL; a = next) { next = a->next; // Grab now because attribute might get removed if ((match == NULL) || match(a, user_data)) { if (!pcmk__check_acl(element, NULL, xpf_acl_write)) { crm_trace("ACLs prevent removal of attributes (%s and " "possibly others) from %s element", (const char *) a->name, (const char *) element->name); return; // ACLs apply to element, not particular attributes } if (pcmk__tracking_xml_changes(element, false)) { // Leave (marked for removal) until after diff is calculated set_parent_flag(element, xpf_dirty); pcmk__set_xml_flags((xml_private_t *) a->_private, xpf_deleted); } else { xmlRemoveProp(a); } } } } xmlDoc * getDocPtr(xmlNode * node) { xmlDoc *doc = NULL; CRM_CHECK(node != NULL, return NULL); doc = node->doc; if (doc == NULL) { doc = xmlNewDoc((pcmkXmlStr) "1.0"); xmlDocSetRootElement(doc, node); xmlSetTreeDoc(node, doc); } return doc; } xmlNode * add_node_copy(xmlNode * parent, xmlNode * src_node) { xmlNode *child = NULL; xmlDoc *doc = getDocPtr(parent); CRM_CHECK(src_node != NULL, return NULL); child = xmlDocCopyNode(src_node, doc, 1); xmlAddChild(parent, child); pcmk__mark_xml_created(child); return child; } int add_node_nocopy(xmlNode * parent, const char *name, xmlNode * child) { add_node_copy(parent, child); free_xml(child); return 1; } xmlNode * create_xml_node(xmlNode * parent, const char *name) { xmlDoc *doc = NULL; xmlNode *node = NULL; if (pcmk__str_empty(name)) { CRM_CHECK(name != NULL && name[0] == 0, return NULL); return NULL; } if (parent == NULL) { doc = xmlNewDoc((pcmkXmlStr) "1.0"); node = xmlNewDocRawNode(doc, NULL, (pcmkXmlStr) name, NULL); xmlDocSetRootElement(doc, node); } else { doc = getDocPtr(parent); node = xmlNewDocRawNode(doc, NULL, (pcmkXmlStr) name, NULL); xmlAddChild(parent, node); } pcmk__mark_xml_created(node); return node; } xmlNode * pcmk_create_xml_text_node(xmlNode * parent, const char *name, const char *content) { xmlNode *node = create_xml_node(parent, name); if (node != NULL) { xmlNodeSetContent(node, (pcmkXmlStr) content); } return node; } xmlNode * pcmk_create_html_node(xmlNode * parent, const char *element_name, const char *id, const char *class_name, const char *text) { xmlNode *node = pcmk_create_xml_text_node(parent, element_name, text); if (class_name != NULL) { crm_xml_add(node, "class", class_name); } if (id != NULL) { crm_xml_add(node, "id", id); } return node; } /*! * Free an XML element and all of its children, removing it from its parent * * \param[in] xml XML element to free */ void pcmk_free_xml_subtree(xmlNode *xml) { xmlUnlinkNode(xml); // Detaches from parent and siblings xmlFreeNode(xml); // Frees } static void free_xml_with_position(xmlNode * child, int position) { if (child != NULL) { xmlNode *top = NULL; xmlDoc *doc = child->doc; xml_private_t *p = child->_private; if (doc != NULL) { top = xmlDocGetRootElement(doc); } if (doc != NULL && top == child) { /* Free everything */ xmlFreeDoc(doc); } else if (pcmk__check_acl(child, NULL, xpf_acl_write) == FALSE) { int offset = 0; char buffer[PCMK__BUFFER_SIZE]; pcmk__element_xpath(NULL, child, buffer, offset, sizeof(buffer)); crm_trace("Cannot remove %s %x", buffer, p->flags); return; } else { if (doc && pcmk__tracking_xml_changes(child, FALSE) && !pcmk_is_set(p->flags, xpf_created)) { int offset = 0; char buffer[PCMK__BUFFER_SIZE]; if (pcmk__element_xpath(NULL, child, buffer, offset, sizeof(buffer)) > 0) { pcmk__deleted_xml_t *deleted_obj = NULL; crm_trace("Deleting %s %p from %p", buffer, child, doc); deleted_obj = calloc(1, sizeof(pcmk__deleted_xml_t)); deleted_obj->path = strdup(buffer); deleted_obj->position = -1; /* Record the "position" only for XML comments for now */ if (child->type == XML_COMMENT_NODE) { if (position >= 0) { deleted_obj->position = position; } else { deleted_obj->position = pcmk__xml_position(child, xpf_skip); } } p = doc->_private; p->deleted_objs = g_list_append(p->deleted_objs, deleted_obj); pcmk__set_xml_doc_flag(child, xpf_dirty); } } pcmk_free_xml_subtree(child); } } } void free_xml(xmlNode * child) { free_xml_with_position(child, -1); } xmlNode * copy_xml(xmlNode * src) { xmlDoc *doc = xmlNewDoc((pcmkXmlStr) "1.0"); xmlNode *copy = xmlDocCopyNode(src, doc, 1); xmlDocSetRootElement(doc, copy); xmlSetTreeDoc(copy, doc); return copy; } static void log_xmllib_err(void *ctx, const char *fmt, ...) G_GNUC_PRINTF(2, 3); // Log an XML library error static void log_xmllib_err(void *ctx, const char *fmt, ...) { va_list ap; static struct qb_log_callsite *xml_error_cs = NULL; if (xml_error_cs == NULL) { xml_error_cs = qb_log_callsite_get( __func__, __FILE__, "xml library error", LOG_TRACE, __LINE__, crm_trace_nonlog); } va_start(ap, fmt); if (xml_error_cs && xml_error_cs->targets) { PCMK__XML_LOG_BASE(LOG_ERR, TRUE, crm_abort(__FILE__, __PRETTY_FUNCTION__, __LINE__, "xml library error", TRUE, TRUE), "XML Error: ", fmt, ap); } else { PCMK__XML_LOG_BASE(LOG_ERR, TRUE, 0, "XML Error: ", fmt, ap); } va_end(ap); } xmlNode * string2xml(const char *input) { xmlNode *xml = NULL; xmlDocPtr output = NULL; xmlParserCtxtPtr ctxt = NULL; xmlErrorPtr last_error = NULL; if (input == NULL) { crm_err("Can't parse NULL input"); return NULL; } /* create a parser context */ ctxt = xmlNewParserCtxt(); CRM_CHECK(ctxt != NULL, return NULL); xmlCtxtResetLastError(ctxt); xmlSetGenericErrorFunc(ctxt, log_xmllib_err); output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL, PCMK__XML_PARSE_OPTS); if (output) { xml = xmlDocGetRootElement(output); } last_error = xmlCtxtGetLastError(ctxt); if (last_error && last_error->code != XML_ERR_OK) { /* crm_abort(__FILE__,__func__,__LINE__, "last_error->code != XML_ERR_OK", TRUE, TRUE); */ /* * http://xmlsoft.org/html/libxml-xmlerror.html#xmlErrorLevel * http://xmlsoft.org/html/libxml-xmlerror.html#xmlParserErrors */ crm_warn("Parsing failed (domain=%d, level=%d, code=%d): %s", last_error->domain, last_error->level, last_error->code, last_error->message); if (last_error->code == XML_ERR_DOCUMENT_EMPTY) { CRM_LOG_ASSERT("Cannot parse an empty string"); } else if (last_error->code != XML_ERR_DOCUMENT_END) { crm_err("Couldn't%s parse %d chars: %s", xml ? " fully" : "", (int)strlen(input), input); if (xml != NULL) { crm_log_xml_err(xml, "Partial"); } } else { int len = strlen(input); int lpc = 0; while(lpc < len) { crm_warn("Parse error[+%.3d]: %.80s", lpc, input+lpc); lpc += 80; } CRM_LOG_ASSERT("String parsing error"); } } xmlFreeParserCtxt(ctxt); return xml; } xmlNode * stdin2xml(void) { size_t data_length = 0; size_t read_chars = 0; char *xml_buffer = NULL; xmlNode *xml_obj = NULL; do { xml_buffer = pcmk__realloc(xml_buffer, data_length + PCMK__BUFFER_SIZE); read_chars = fread(xml_buffer + data_length, 1, PCMK__BUFFER_SIZE, stdin); data_length += read_chars; } while (read_chars == PCMK__BUFFER_SIZE); if (data_length == 0) { crm_warn("No XML supplied on stdin"); free(xml_buffer); return NULL; } xml_buffer[data_length] = '\0'; xml_obj = string2xml(xml_buffer); free(xml_buffer); crm_log_xml_trace(xml_obj, "Created fragment"); return xml_obj; } static char * decompress_file(const char *filename) { char *buffer = NULL; - -#if HAVE_BZLIB_H int rc = 0; size_t length = 0, read_len = 0; - BZFILE *bz_file = NULL; FILE *input = fopen(filename, "r"); if (input == NULL) { crm_perror(LOG_ERR, "Could not open %s for reading", filename); return NULL; } bz_file = BZ2_bzReadOpen(&rc, input, 0, 0, NULL, 0); if (rc != BZ_OK) { crm_err("Could not prepare to read compressed %s: %s " CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc); BZ2_bzReadClose(&rc, bz_file); return NULL; } rc = BZ_OK; // cppcheck seems not to understand the abort-logic in pcmk__realloc // cppcheck-suppress memleak while (rc == BZ_OK) { buffer = pcmk__realloc(buffer, PCMK__BUFFER_SIZE + length + 1); read_len = BZ2_bzRead(&rc, bz_file, buffer + length, PCMK__BUFFER_SIZE); crm_trace("Read %ld bytes from file: %d", (long)read_len, rc); if (rc == BZ_OK || rc == BZ_STREAM_END) { length += read_len; } } buffer[length] = '\0'; if (rc != BZ_STREAM_END) { crm_err("Could not read compressed %s: %s " CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc); free(buffer); buffer = NULL; } BZ2_bzReadClose(&rc, bz_file); fclose(input); - -#else - crm_err("Could not read compressed %s: not built with bzlib support", - filename); -#endif return buffer; } /*! * \internal * \brief Remove XML text nodes from specified XML and all its children * * \param[in,out] xml XML to strip text from */ void pcmk__strip_xml_text(xmlNode *xml) { xmlNode *iter = xml->children; while (iter) { xmlNode *next = iter->next; switch (iter->type) { case XML_TEXT_NODE: /* Remove it */ pcmk_free_xml_subtree(iter); break; case XML_ELEMENT_NODE: /* Search it */ pcmk__strip_xml_text(iter); break; default: /* Leave it */ break; } iter = next; } } xmlNode * filename2xml(const char *filename) { xmlNode *xml = NULL; xmlDocPtr output = NULL; gboolean uncompressed = TRUE; xmlParserCtxtPtr ctxt = NULL; xmlErrorPtr last_error = NULL; /* create a parser context */ ctxt = xmlNewParserCtxt(); CRM_CHECK(ctxt != NULL, return NULL); xmlCtxtResetLastError(ctxt); xmlSetGenericErrorFunc(ctxt, log_xmllib_err); if (filename) { uncompressed = !pcmk__ends_with_ext(filename, ".bz2"); } if (filename == NULL) { /* STDIN_FILENO == fileno(stdin) */ output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL, PCMK__XML_PARSE_OPTS); } else if (uncompressed) { output = xmlCtxtReadFile(ctxt, filename, NULL, PCMK__XML_PARSE_OPTS); } else { char *input = decompress_file(filename); output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL, PCMK__XML_PARSE_OPTS); free(input); } if (output && (xml = xmlDocGetRootElement(output))) { pcmk__strip_xml_text(xml); } last_error = xmlCtxtGetLastError(ctxt); if (last_error && last_error->code != XML_ERR_OK) { /* crm_abort(__FILE__,__func__,__LINE__, "last_error->code != XML_ERR_OK", TRUE, TRUE); */ /* * http://xmlsoft.org/html/libxml-xmlerror.html#xmlErrorLevel * http://xmlsoft.org/html/libxml-xmlerror.html#xmlParserErrors */ crm_err("Parsing failed (domain=%d, level=%d, code=%d): %s", last_error->domain, last_error->level, last_error->code, last_error->message); if (last_error && last_error->code != XML_ERR_OK) { crm_err("Couldn't%s parse %s", xml ? " fully" : "", filename); if (xml != NULL) { crm_log_xml_err(xml, "Partial"); } } } xmlFreeParserCtxt(ctxt); return xml; } /*! * \internal * \brief Add a "last written" attribute to an XML element, set to current time * * \param[in] xe XML element to add attribute to * * \return Value that was set, or NULL on error */ const char * pcmk__xe_add_last_written(xmlNode *xe) { const char *now_str = pcmk__epoch2str(NULL); return crm_xml_add(xe, XML_CIB_ATTR_WRITTEN, now_str ? now_str : "Could not determine current time"); } /*! * \brief Sanitize a string so it is usable as an XML ID * * \param[in,out] id String to sanitize */ void crm_xml_sanitize_id(char *id) { char *c; for (c = id; *c; ++c) { /* @TODO Sanitize more comprehensively */ switch (*c) { case ':': case '#': *c = '.'; } } } /*! * \brief Set the ID of an XML element using a format * * \param[in,out] xml XML element * \param[in] fmt printf-style format * \param[in] ... any arguments required by format */ void crm_xml_set_id(xmlNode *xml, const char *format, ...) { va_list ap; int len = 0; char *id = NULL; /* equivalent to crm_strdup_printf() */ va_start(ap, format); len = vasprintf(&id, format, ap); va_end(ap); CRM_ASSERT(len > 0); crm_xml_sanitize_id(id); crm_xml_add(xml, XML_ATTR_ID, id); free(id); } /*! * \internal * \brief Write XML to a file stream * * \param[in] xml_node XML to write * \param[in] filename Name of file being written (for logging only) * \param[in] stream Open file stream corresponding to filename * \param[in] compress Whether to compress XML before writing * \param[out] nbytes Number of bytes written * * \return Standard Pacemaker return code */ static int write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream, bool compress, unsigned int *nbytes) { int rc = pcmk_rc_ok; char *buffer = NULL; *nbytes = 0; crm_log_xml_trace(xml_node, "writing"); buffer = dump_xml_formatted(xml_node); CRM_CHECK(buffer && strlen(buffer), crm_log_xml_warn(xml_node, "formatting failed"); rc = pcmk_rc_error; goto bail); if (compress) { -#if HAVE_BZLIB_H unsigned int in = 0; BZFILE *bz_file = NULL; rc = BZ_OK; bz_file = BZ2_bzWriteOpen(&rc, stream, 5, 0, 30); if (rc != BZ_OK) { crm_warn("Not compressing %s: could not prepare file stream: %s " CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc); } else { BZ2_bzWrite(&rc, bz_file, buffer, strlen(buffer)); if (rc != BZ_OK) { crm_warn("Not compressing %s: could not compress data: %s " CRM_XS " bzerror=%d errno=%d", filename, bz2_strerror(rc), rc, errno); } } if (rc == BZ_OK) { BZ2_bzWriteClose(&rc, bz_file, 0, &in, nbytes); if (rc != BZ_OK) { crm_warn("Not compressing %s: could not write compressed data: %s " CRM_XS " bzerror=%d errno=%d", filename, bz2_strerror(rc), rc, errno); *nbytes = 0; // retry without compression } else { crm_trace("Compressed XML for %s from %u bytes to %u", filename, in, *nbytes); } } rc = pcmk_rc_ok; // Either true, or we'll retry without compression -#else - crm_warn("Not compressing %s: not built with bzlib support", filename); -#endif } if (*nbytes == 0) { rc = fprintf(stream, "%s", buffer); if (rc < 0) { rc = errno; crm_perror(LOG_ERR, "writing %s", filename); } else { *nbytes = (unsigned int) rc; rc = pcmk_rc_ok; } } bail: if (fflush(stream) != 0) { rc = errno; crm_perror(LOG_ERR, "flushing %s", filename); } /* Don't report error if the file does not support synchronization */ if (fsync(fileno(stream)) < 0 && errno != EROFS && errno != EINVAL) { rc = errno; crm_perror(LOG_ERR, "synchronizing %s", filename); } fclose(stream); crm_trace("Saved %d bytes to %s as XML", *nbytes, filename); free(buffer); return rc; } /*! * \brief Write XML to a file descriptor * * \param[in] xml_node XML to write * \param[in] filename Name of file being written (for logging only) * \param[in] fd Open file descriptor corresponding to filename * \param[in] compress Whether to compress XML before writing * * \return Number of bytes written on success, -errno otherwise */ int write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress) { FILE *stream = NULL; unsigned int nbytes = 0; int rc = pcmk_rc_ok; CRM_CHECK(xml_node && (fd > 0), return -EINVAL); stream = fdopen(fd, "w"); if (stream == NULL) { return -errno; } rc = write_xml_stream(xml_node, filename, stream, compress, &nbytes); if (rc != pcmk_rc_ok) { return pcmk_rc2legacy(rc); } return (int) nbytes; } /*! * \brief Write XML to a file * * \param[in] xml_node XML to write * \param[in] filename Name of file to write * \param[in] compress Whether to compress XML before writing * * \return Number of bytes written on success, -errno otherwise */ int write_xml_file(xmlNode * xml_node, const char *filename, gboolean compress) { FILE *stream = NULL; unsigned int nbytes = 0; int rc = pcmk_rc_ok; CRM_CHECK(xml_node && filename, return -EINVAL); stream = fopen(filename, "w"); if (stream == NULL) { return -errno; } rc = write_xml_stream(xml_node, filename, stream, compress, &nbytes); if (rc != pcmk_rc_ok) { return pcmk_rc2legacy(rc); } return (int) nbytes; } // Replace a portion of a dynamically allocated string (reallocating memory) static char * replace_text(char *text, int start, int *length, const char *replace) { int lpc; int offset = strlen(replace) - 1; /* We have space for 1 char already */ *length += offset; text = pcmk__realloc(text, *length); for (lpc = (*length) - 1; lpc > (start + offset); lpc--) { text[lpc] = text[lpc - offset]; } memcpy(text + start, replace, offset + 1); return text; } char * crm_xml_escape(const char *text) { int index; int changes = 0; int length = 1 + strlen(text); char *copy = strdup(text); /* * When xmlCtxtReadDoc() parses < and friends in a * value, it converts them to their human readable * form. * * If one uses xmlNodeDump() to convert it back to a * string, all is well, because special characters are * converted back to their escape sequences. * * However xmlNodeDump() is randomly dog slow, even with the same * input. So we need to replicate the escaping in our custom * version so that the result can be re-parsed by xmlCtxtReadDoc() * when necessary. */ for (index = 0; index < length; index++) { switch (copy[index]) { case 0: break; case '<': copy = replace_text(copy, index, &length, "<"); changes++; break; case '>': copy = replace_text(copy, index, &length, ">"); changes++; break; case '"': copy = replace_text(copy, index, &length, """); changes++; break; case '\'': copy = replace_text(copy, index, &length, "'"); changes++; break; case '&': copy = replace_text(copy, index, &length, "&"); changes++; break; case '\t': /* Might as well just expand to a few spaces... */ copy = replace_text(copy, index, &length, " "); changes++; break; case '\n': /* crm_trace("Convert: \\%.3o", copy[index]); */ copy = replace_text(copy, index, &length, "\\n"); changes++; break; case '\r': copy = replace_text(copy, index, &length, "\\r"); changes++; break; /* For debugging... case '\\': crm_trace("Passthrough: \\%c", copy[index+1]); break; */ default: /* Check for and replace non-printing characters with their octal equivalent */ if(copy[index] < ' ' || copy[index] > '~') { char *replace = crm_strdup_printf("\\%.3o", copy[index]); /* crm_trace("Convert to octal: \\%.3o", copy[index]); */ copy = replace_text(copy, index, &length, replace); free(replace); changes++; } } } if (changes) { crm_trace("Dumped '%s'", copy); } return copy; } static inline void dump_xml_attr(xmlAttrPtr attr, int options, char **buffer, int *offset, int *max) { char *p_value = NULL; const char *p_name = NULL; xml_private_t *p = NULL; CRM_ASSERT(buffer != NULL); if (attr == NULL || attr->children == NULL) { return; } p = attr->_private; if (p && pcmk_is_set(p->flags, xpf_deleted)) { return; } p_name = (const char *)attr->name; p_value = crm_xml_escape((const char *)attr->children->content); buffer_print(*buffer, *max, *offset, " %s=\"%s\"", p_name, p_value); free(p_value); } // Log an XML element (and any children) in a formatted way void pcmk__xe_log(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode *data, int depth, int options) { int max = 0; int offset = 0; const char *name = NULL; const char *hidden = NULL; xmlNode *child = NULL; if ((data == NULL) || (log_level == LOG_NEVER)) { return; } name = crm_element_name(data); if (pcmk_is_set(options, xml_log_option_open)) { char *buffer = NULL; insert_prefix(options, &buffer, &offset, &max, depth); if (data->type == XML_COMMENT_NODE) { buffer_print(buffer, max, offset, "", data->content); } else { buffer_print(buffer, max, offset, "<%s", name); hidden = crm_element_value(data, "hidden"); for (xmlAttrPtr a = pcmk__xe_first_attr(data); a != NULL; a = a->next) { xml_private_t *p = a->_private; const char *p_name = (const char *) a->name; const char *p_value = pcmk__xml_attr_value(a); char *p_copy = NULL; if (pcmk_is_set(p->flags, xpf_deleted)) { continue; } else if (pcmk_any_flags_set(options, xml_log_option_diff_plus |xml_log_option_diff_minus) && (strcmp(XML_DIFF_MARKER, p_name) == 0)) { continue; } else if (hidden != NULL && p_name[0] != 0 && strstr(hidden, p_name) != NULL) { p_copy = strdup("*****"); } else { p_copy = crm_xml_escape(p_value); } buffer_print(buffer, max, offset, " %s=\"%s\"", p_name, p_copy); free(p_copy); } if(xml_has_children(data) == FALSE) { buffer_print(buffer, max, offset, "/>"); } else if (pcmk_is_set(options, xml_log_option_children)) { buffer_print(buffer, max, offset, ">"); } else { buffer_print(buffer, max, offset, "/>"); } } do_crm_log_alias(log_level, file, function, line, "%s %s", prefix, buffer); free(buffer); } if(data->type == XML_COMMENT_NODE) { return; } else if(xml_has_children(data) == FALSE) { return; } else if (pcmk_is_set(options, xml_log_option_children)) { offset = 0; max = 0; for (child = pcmk__xml_first_child(data); child != NULL; child = pcmk__xml_next(child)) { pcmk__xe_log(log_level, file, function, line, prefix, child, depth + 1, options|xml_log_option_open|xml_log_option_close); } } if (pcmk_is_set(options, xml_log_option_close)) { char *buffer = NULL; insert_prefix(options, &buffer, &offset, &max, depth); buffer_print(buffer, max, offset, "", name); do_crm_log_alias(log_level, file, function, line, "%s %s", prefix, buffer); free(buffer); } } // Log XML portions that have been marked as changed static void log_xml_changes(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode *data, int depth, int options) { xml_private_t *p; char *prefix_m = NULL; xmlNode *child = NULL; if ((data == NULL) || (log_level == LOG_NEVER)) { return; } p = data->_private; prefix_m = strdup(prefix); prefix_m[1] = '+'; if (pcmk_all_flags_set(p->flags, xpf_dirty|xpf_created)) { /* Continue and log full subtree */ pcmk__xe_log(log_level, file, function, line, prefix_m, data, depth, options|xml_log_option_open|xml_log_option_close |xml_log_option_children); } else if (pcmk_is_set(p->flags, xpf_dirty)) { char *spaces = calloc(80, 1); int s_count = 0, s_max = 80; char *prefix_del = NULL; char *prefix_moved = NULL; const char *flags = prefix; insert_prefix(options, &spaces, &s_count, &s_max, depth); prefix_del = strdup(prefix); prefix_del[0] = '-'; prefix_del[1] = '-'; prefix_moved = strdup(prefix); prefix_moved[1] = '~'; if (pcmk_is_set(p->flags, xpf_moved)) { flags = prefix_moved; } else { flags = prefix; } pcmk__xe_log(log_level, file, function, line, flags, data, depth, options|xml_log_option_open); for (xmlAttrPtr a = pcmk__xe_first_attr(data); a != NULL; a = a->next) { const char *aname = (const char*) a->name; p = a->_private; if (pcmk_is_set(p->flags, xpf_deleted)) { const char *value = crm_element_value(data, aname); flags = prefix_del; do_crm_log_alias(log_level, file, function, line, "%s %s @%s=%s", flags, spaces, aname, value); } else if (pcmk_is_set(p->flags, xpf_dirty)) { const char *value = crm_element_value(data, aname); if (pcmk_is_set(p->flags, xpf_created)) { flags = prefix_m; } else if (pcmk_is_set(p->flags, xpf_modified)) { flags = prefix; } else if (pcmk_is_set(p->flags, xpf_moved)) { flags = prefix_moved; } else { flags = prefix; } do_crm_log_alias(log_level, file, function, line, "%s %s @%s=%s", flags, spaces, aname, value); } } free(prefix_moved); free(prefix_del); free(spaces); for (child = pcmk__xml_first_child(data); child != NULL; child = pcmk__xml_next(child)) { log_xml_changes(log_level, file, function, line, prefix, child, depth + 1, options); } pcmk__xe_log(log_level, file, function, line, prefix, data, depth, options|xml_log_option_close); } else { for (child = pcmk__xml_first_child(data); child != NULL; child = pcmk__xml_next(child)) { log_xml_changes(log_level, file, function, line, prefix, child, depth + 1, options); } } free(prefix_m); } void log_data_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options) { xmlNode *a_child = NULL; char *prefix_m = NULL; if (log_level == LOG_NEVER) { return; } if (prefix == NULL) { prefix = ""; } /* Since we use the same file and line, to avoid confusing libqb, we need to use the same format strings */ if (data == NULL) { do_crm_log_alias(log_level, file, function, line, "%s: %s", prefix, "No data to dump as XML"); return; } if (pcmk_is_set(options, xml_log_option_dirty_add)) { log_xml_changes(log_level, file, function, line, prefix, data, depth, options); return; } if (pcmk_is_set(options, xml_log_option_formatted)) { if (pcmk_is_set(options, xml_log_option_diff_plus) && (data->children == NULL || crm_element_value(data, XML_DIFF_MARKER))) { options |= xml_log_option_diff_all; prefix_m = strdup(prefix); prefix_m[1] = '+'; prefix = prefix_m; } else if (pcmk_is_set(options, xml_log_option_diff_minus) && (data->children == NULL || crm_element_value(data, XML_DIFF_MARKER))) { options |= xml_log_option_diff_all; prefix_m = strdup(prefix); prefix_m[1] = '-'; prefix = prefix_m; } } if (pcmk_is_set(options, xml_log_option_diff_short) && !pcmk_is_set(options, xml_log_option_diff_all)) { /* Still searching for the actual change */ for (a_child = pcmk__xml_first_child(data); a_child != NULL; a_child = pcmk__xml_next(a_child)) { log_data_element(log_level, file, function, line, prefix, a_child, depth + 1, options); } } else { pcmk__xe_log(log_level, file, function, line, prefix, data, depth, options|xml_log_option_open|xml_log_option_close |xml_log_option_children); } free(prefix_m); } static void dump_filtered_xml(xmlNode * data, int options, char **buffer, int *offset, int *max) { for (xmlAttrPtr a = pcmk__xe_first_attr(data); a != NULL; a = a->next) { if (!pcmk__xa_filterable((const char *) (a->name))) { dump_xml_attr(a, options, buffer, offset, max); } } } static void dump_xml_element(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { const char *name = NULL; CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } name = crm_element_name(data); CRM_ASSERT(name != NULL); insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "<%s", name); if (options & xml_log_option_filtered) { dump_filtered_xml(data, options, buffer, offset, max); } else { for (xmlAttrPtr a = pcmk__xe_first_attr(data); a != NULL; a = a->next) { dump_xml_attr(a, options, buffer, offset, max); } } if (data->children == NULL) { buffer_print(*buffer, *max, *offset, "/>"); } else { buffer_print(*buffer, *max, *offset, ">"); } if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } if (data->children) { xmlNode *xChild = NULL; for(xChild = data->children; xChild != NULL; xChild = xChild->next) { pcmk__xml2text(xChild, options, buffer, offset, max, depth + 1); } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "", name); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } } static void dump_xml_text(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "%s", data->content); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } static void dump_xml_cdata(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "content); buffer_print(*buffer, *max, *offset, "]]>"); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } static void dump_xml_comment(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, ""); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } #define PCMK__XMLDUMP_STATS 0 /*! * \internal * \brief Create a text representation of an XML object * * \param[in] data XML to convert * \param[in] options Group of enum xml_log_options flags * \param[in,out] buffer Buffer to store text in (may be reallocated) * \param[in,out] offset Current position of null terminator within \p buffer * \param[in,out] max Current size of \p buffer in bytes * \param[in] depth Current indentation level */ void pcmk__xml2text(xmlNode *data, int options, char **buffer, int *offset, int *max, int depth) { if(data == NULL) { *offset = 0; *max = 0; return; } if (!pcmk_is_set(options, xml_log_option_filtered) && pcmk_is_set(options, xml_log_option_full_fledged)) { /* libxml's serialization reuse is a good idea, sadly we cannot apply it for the filtered cases (preceding filtering pass would preclude further reuse of such in-situ modified XML in generic context and is likely not a win performance-wise), and there's also a historically unstable throughput argument (likely stemming from memory allocation overhead, eventhough that shall be minimized with defaults preset in crm_xml_init) */ #if (PCMK__XMLDUMP_STATS - 0) time_t next, new = time(NULL); #endif xmlDoc *doc; xmlOutputBuffer *xml_buffer; doc = getDocPtr(data); /* doc will only be NULL if data is */ CRM_CHECK(doc != NULL, return); xml_buffer = xmlAllocOutputBuffer(NULL); CRM_ASSERT(xml_buffer != NULL); /* XXX we could setup custom allocation scheme for the particular buffer, but it's subsumed with crm_xml_init that needs to be invoked prior to entering this function as such, since its other branch vitally depends on it -- what can be done about this all is to have a facade parsing functions that would 100% mark entering libxml code for us, since we don't do anything as crazy as swapping out the binary form of the parsed tree (but those would need to be strictly used as opposed to libxml's raw functions) */ xmlNodeDumpOutput(xml_buffer, doc, data, 0, (options & xml_log_option_formatted), NULL); /* attempt adding final NL - failing shouldn't be fatal here */ (void) xmlOutputBufferWrite(xml_buffer, sizeof("\n") - 1, "\n"); if (xml_buffer->buffer != NULL) { buffer_print(*buffer, *max, *offset, "%s", (char *) xmlBufContent(xml_buffer->buffer)); } #if (PCMK__XMLDUMP_STATS - 0) next = time(NULL); if ((now + 1) < next) { crm_log_xml_trace(data, "Long time"); crm_err("xmlNodeDump() -> %dbytes took %ds", *max, next - now); } #endif /* asserted allocation before so there should be something to remove */ (void) xmlOutputBufferClose(xml_buffer); return; } switch(data->type) { case XML_ELEMENT_NODE: /* Handle below */ dump_xml_element(data, options, buffer, offset, max, depth); break; case XML_TEXT_NODE: /* if option xml_log_option_text is enabled, then dump XML_TEXT_NODE */ if (options & xml_log_option_text) { dump_xml_text(data, options, buffer, offset, max, depth); } return; case XML_COMMENT_NODE: dump_xml_comment(data, options, buffer, offset, max, depth); break; case XML_CDATA_SECTION_NODE: dump_xml_cdata(data, options, buffer, offset, max, depth); break; default: crm_warn("Unhandled type: %d", data->type); return; /* XML_ATTRIBUTE_NODE = 2 XML_ENTITY_REF_NODE = 5 XML_ENTITY_NODE = 6 XML_PI_NODE = 7 XML_DOCUMENT_NODE = 9 XML_DOCUMENT_TYPE_NODE = 10 XML_DOCUMENT_FRAG_NODE = 11 XML_NOTATION_NODE = 12 XML_HTML_DOCUMENT_NODE = 13 XML_DTD_NODE = 14 XML_ELEMENT_DECL = 15 XML_ATTRIBUTE_DECL = 16 XML_ENTITY_DECL = 17 XML_NAMESPACE_DECL = 18 XML_XINCLUDE_START = 19 XML_XINCLUDE_END = 20 XML_DOCB_DOCUMENT_NODE = 21 */ } } /*! * \internal * \brief Add a single character to a dynamically allocated buffer * * \param[in,out] buffer Buffer to store text in (may be reallocated) * \param[in,out] offset Current position of null terminator within \p buffer * \param[in,out] max Current size of \p buffer in bytes * \param[in] c Character to add to \p buffer */ void pcmk__buffer_add_char(char **buffer, int *offset, int *max, char c) { buffer_print(*buffer, *max, *offset, "%c", c); } char * dump_xml_formatted_with_text(xmlNode * an_xml_node) { char *buffer = NULL; int offset = 0, max = 0; pcmk__xml2text(an_xml_node, xml_log_option_formatted|xml_log_option_full_fledged, &buffer, &offset, &max, 0); return buffer; } char * dump_xml_formatted(xmlNode * an_xml_node) { char *buffer = NULL; int offset = 0, max = 0; pcmk__xml2text(an_xml_node, xml_log_option_formatted, &buffer, &offset, &max, 0); return buffer; } char * dump_xml_unformatted(xmlNode * an_xml_node) { char *buffer = NULL; int offset = 0, max = 0; pcmk__xml2text(an_xml_node, 0, &buffer, &offset, &max, 0); return buffer; } gboolean xml_has_children(const xmlNode * xml_root) { if (xml_root != NULL && xml_root->children != NULL) { return TRUE; } return FALSE; } void xml_remove_prop(xmlNode * obj, const char *name) { if (pcmk__check_acl(obj, NULL, xpf_acl_write) == FALSE) { crm_trace("Cannot remove %s from %s", name, obj->name); } else if (pcmk__tracking_xml_changes(obj, FALSE)) { /* Leave in place (marked for removal) until after the diff is calculated */ xml_private_t *p = NULL; xmlAttr *attr = xmlHasProp(obj, (pcmkXmlStr) name); p = attr->_private; set_parent_flag(obj, xpf_dirty); pcmk__set_xml_flags(p, xpf_deleted); } else { xmlUnsetProp(obj, (pcmkXmlStr) name); } } void save_xml_to_file(xmlNode * xml, const char *desc, const char *filename) { char *f = NULL; if (filename == NULL) { char *uuid = crm_generate_uuid(); f = crm_strdup_printf("%s/%s", pcmk__get_tmpdir(), uuid); filename = f; free(uuid); } crm_info("Saving %s to %s", desc, filename); write_xml_file(xml, filename, FALSE); free(f); } /*! * \internal * \brief Set a flag on all attributes of an XML element * * \param[in,out] xml XML node to set flags on * \param[in] flag XML private flag to set */ static void set_attrs_flag(xmlNode *xml, enum xml_private_flags flag) { for (xmlAttr *attr = pcmk__xe_first_attr(xml); attr; attr = attr->next) { pcmk__set_xml_flags((xml_private_t *) (attr->_private), flag); } } /*! * \internal * \brief Add an XML attribute to a node, marked as deleted * * When calculating XML changes, we need to know when an attribute has been * deleted. Add the attribute back to the new XML, so that we can check the * removal against ACLs, and mark it as deleted for later removal after * differences have been calculated. */ static void mark_attr_deleted(xmlNode *new_xml, const char *element, const char *attr_name, const char *old_value) { xml_private_t *p = new_xml->doc->_private; xmlAttr *attr = NULL; // Prevent the dirty flag being set recursively upwards pcmk__clear_xml_flags(p, xpf_tracking); // Restore the old value (and the tracking flag) attr = xmlSetProp(new_xml, (pcmkXmlStr) attr_name, (pcmkXmlStr) old_value); pcmk__set_xml_flags(p, xpf_tracking); // Reset flags (so the attribute doesn't appear as newly created) p = attr->_private; p->flags = 0; // Check ACLs and mark restored value for later removal xml_remove_prop(new_xml, attr_name); crm_trace("XML attribute %s=%s was removed from %s", attr_name, old_value, element); } /* * \internal * \brief Check ACLs for a changed XML attribute */ static void mark_attr_changed(xmlNode *new_xml, const char *element, const char *attr_name, const char *old_value) { char *vcopy = crm_element_value_copy(new_xml, attr_name); crm_trace("XML attribute %s was changed from '%s' to '%s' in %s", attr_name, old_value, vcopy, element); // Restore the original value xmlSetProp(new_xml, (pcmkXmlStr) attr_name, (pcmkXmlStr) old_value); // Change it back to the new value, to check ACLs crm_xml_add(new_xml, attr_name, vcopy); free(vcopy); } /*! * \internal * \brief Mark an XML attribute as having changed position */ static void mark_attr_moved(xmlNode *new_xml, const char *element, xmlAttr *old_attr, xmlAttr *new_attr, int p_old, int p_new) { xml_private_t *p = new_attr->_private; crm_trace("XML attribute %s moved from position %d to %d in %s", old_attr->name, p_old, p_new, element); // Mark document, element, and all element's parents as changed mark_xml_node_dirty(new_xml); // Mark attribute as changed pcmk__set_xml_flags(p, xpf_dirty|xpf_moved); p = (p_old > p_new)? old_attr->_private : new_attr->_private; pcmk__set_xml_flags(p, xpf_skip); } /*! * \internal * \brief Calculate differences in all previously existing XML attributes */ static void xml_diff_old_attrs(xmlNode *old_xml, xmlNode *new_xml) { xmlAttr *attr_iter = pcmk__xe_first_attr(old_xml); while (attr_iter != NULL) { xmlAttr *old_attr = attr_iter; xmlAttr *new_attr = xmlHasProp(new_xml, attr_iter->name); const char *name = (const char *) attr_iter->name; const char *old_value = crm_element_value(old_xml, name); attr_iter = attr_iter->next; if (new_attr == NULL) { mark_attr_deleted(new_xml, (const char *) old_xml->name, name, old_value); } else { xml_private_t *p = new_attr->_private; int new_pos = pcmk__xml_position((xmlNode*) new_attr, xpf_skip); int old_pos = pcmk__xml_position((xmlNode*) old_attr, xpf_skip); const char *new_value = crm_element_value(new_xml, name); // This attribute isn't new pcmk__clear_xml_flags(p, xpf_created); if (strcmp(new_value, old_value) != 0) { mark_attr_changed(new_xml, (const char *) old_xml->name, name, old_value); } else if ((old_pos != new_pos) && !pcmk__tracking_xml_changes(new_xml, TRUE)) { mark_attr_moved(new_xml, (const char *) old_xml->name, old_attr, new_attr, old_pos, new_pos); } } } } /*! * \internal * \brief Check all attributes in new XML for creation */ static void mark_created_attrs(xmlNode *new_xml) { xmlAttr *attr_iter = pcmk__xe_first_attr(new_xml); while (attr_iter != NULL) { xmlAttr *new_attr = attr_iter; xml_private_t *p = attr_iter->_private; attr_iter = attr_iter->next; if (pcmk_is_set(p->flags, xpf_created)) { const char *attr_name = (const char *) new_attr->name; crm_trace("Created new attribute %s=%s in %s", attr_name, crm_element_value(new_xml, attr_name), new_xml->name); /* Check ACLs (we can't use the remove-then-create trick because it * would modify the attribute position). */ if (pcmk__check_acl(new_xml, attr_name, xpf_acl_write)) { pcmk__mark_xml_attr_dirty(new_attr); } else { // Creation was not allowed, so remove the attribute xmlUnsetProp(new_xml, new_attr->name); } } } } /*! * \internal * \brief Calculate differences in attributes between two XML nodes */ static void xml_diff_attrs(xmlNode *old_xml, xmlNode *new_xml) { set_attrs_flag(new_xml, xpf_created); // cleared later if not really new xml_diff_old_attrs(old_xml, new_xml); mark_created_attrs(new_xml); } /*! * \internal * \brief Add an XML child element to a node, marked as deleted * * When calculating XML changes, we need to know when a child element has been * deleted. Add the child back to the new XML, so that we can check the removal * against ACLs, and mark it as deleted for later removal after differences have * been calculated. */ static void mark_child_deleted(xmlNode *old_child, xmlNode *new_parent) { // Re-create the child element so we can check ACLs xmlNode *candidate = add_node_copy(new_parent, old_child); // Clear flags on new child and its children reset_xml_node_flags(candidate); // Check whether ACLs allow the deletion pcmk__apply_acl(xmlDocGetRootElement(candidate->doc)); // Remove the child again (which will track it in document's deleted_objs) free_xml_with_position(candidate, pcmk__xml_position(old_child, xpf_skip)); if (pcmk__xml_match(new_parent, old_child, true) == NULL) { pcmk__set_xml_flags((xml_private_t *) (old_child->_private), xpf_skip); } } static void mark_child_moved(xmlNode *old_child, xmlNode *new_parent, xmlNode *new_child, int p_old, int p_new) { xml_private_t *p = new_child->_private; crm_trace("Child element %s with id='%s' moved from position %d to %d under %s", new_child->name, (ID(new_child)? ID(new_child) : ""), p_old, p_new, new_parent->name); mark_xml_node_dirty(new_parent); pcmk__set_xml_flags(p, xpf_moved); if (p_old > p_new) { p = old_child->_private; } else { p = new_child->_private; } pcmk__set_xml_flags(p, xpf_skip); } // Given original and new XML, mark new XML portions that have changed static void mark_xml_changes(xmlNode *old_xml, xmlNode *new_xml, bool check_top) { xmlNode *cIter = NULL; xml_private_t *p = NULL; CRM_CHECK(new_xml != NULL, return); if (old_xml == NULL) { pcmk__mark_xml_created(new_xml); pcmk__apply_creation_acl(new_xml, check_top); return; } p = new_xml->_private; CRM_CHECK(p != NULL, return); if(p->flags & xpf_processed) { /* Avoid re-comparing nodes */ return; } pcmk__set_xml_flags(p, xpf_processed); xml_diff_attrs(old_xml, new_xml); // Check for differences in the original children for (cIter = pcmk__xml_first_child(old_xml); cIter != NULL; ) { xmlNode *old_child = cIter; xmlNode *new_child = pcmk__xml_match(new_xml, cIter, true); cIter = pcmk__xml_next(cIter); if(new_child) { mark_xml_changes(old_child, new_child, TRUE); } else { mark_child_deleted(old_child, new_xml); } } // Check for moved or created children for (cIter = pcmk__xml_first_child(new_xml); cIter != NULL; ) { xmlNode *new_child = cIter; xmlNode *old_child = pcmk__xml_match(old_xml, cIter, true); cIter = pcmk__xml_next(cIter); if(old_child == NULL) { // This is a newly created child p = new_child->_private; pcmk__set_xml_flags(p, xpf_skip); mark_xml_changes(old_child, new_child, TRUE); } else { /* Check for movement, we already checked for differences */ int p_new = pcmk__xml_position(new_child, xpf_skip); int p_old = pcmk__xml_position(old_child, xpf_skip); if(p_old != p_new) { mark_child_moved(old_child, new_xml, new_child, p_old, p_new); } } } } void xml_calculate_significant_changes(xmlNode *old_xml, xmlNode *new_xml) { pcmk__set_xml_doc_flag(new_xml, xpf_lazy); xml_calculate_changes(old_xml, new_xml); } void xml_calculate_changes(xmlNode *old_xml, xmlNode *new_xml) { CRM_CHECK(pcmk__str_eq(crm_element_name(old_xml), crm_element_name(new_xml), pcmk__str_casei), return); CRM_CHECK(pcmk__str_eq(ID(old_xml), ID(new_xml), pcmk__str_casei), return); if(xml_tracking_changes(new_xml) == FALSE) { xml_track_changes(new_xml, NULL, NULL, FALSE); } mark_xml_changes(old_xml, new_xml, FALSE); } gboolean can_prune_leaf(xmlNode * xml_node) { xmlNode *cIter = NULL; gboolean can_prune = TRUE; const char *name = crm_element_name(xml_node); if (pcmk__strcase_any_of(name, XML_TAG_RESOURCE_REF, XML_CIB_TAG_OBJ_REF, XML_ACL_TAG_ROLE_REF, XML_ACL_TAG_ROLE_REFv1, NULL)) { return FALSE; } for (xmlAttrPtr a = pcmk__xe_first_attr(xml_node); a != NULL; a = a->next) { const char *p_name = (const char *) a->name; if (strcmp(p_name, XML_ATTR_ID) == 0) { continue; } can_prune = FALSE; } cIter = pcmk__xml_first_child(xml_node); while (cIter) { xmlNode *child = cIter; cIter = pcmk__xml_next(cIter); if (can_prune_leaf(child)) { free_xml(child); } else { can_prune = FALSE; } } return can_prune; } /*! * \internal * \brief Find a comment with matching content in specified XML * * \param[in] root XML to search * \param[in] search_comment Comment whose content should be searched for * \param[in] exact If true, comment must also be at same position */ xmlNode * pcmk__xc_match(xmlNode *root, xmlNode *search_comment, bool exact) { xmlNode *a_child = NULL; int search_offset = pcmk__xml_position(search_comment, xpf_skip); CRM_CHECK(search_comment->type == XML_COMMENT_NODE, return NULL); for (a_child = pcmk__xml_first_child(root); a_child != NULL; a_child = pcmk__xml_next(a_child)) { if (exact) { int offset = pcmk__xml_position(a_child, xpf_skip); xml_private_t *p = a_child->_private; if (offset < search_offset) { continue; } else if (offset > search_offset) { return NULL; } if (pcmk_is_set(p->flags, xpf_skip)) { continue; } } if (a_child->type == XML_COMMENT_NODE && pcmk__str_eq((const char *)a_child->content, (const char *)search_comment->content, pcmk__str_casei)) { return a_child; } else if (exact) { return NULL; } } return NULL; } /*! * \internal * \brief Make one XML comment match another (in content) * * \param[in,out] parent If \p target is NULL and this is not, add or update * comment child of this XML node that matches \p update * \param[in,out] target If not NULL, update this XML comment node * \param[in] update Make comment content match this (must not be NULL) * * \note At least one of \parent and \target must be non-NULL */ void pcmk__xc_update(xmlNode *parent, xmlNode *target, xmlNode *update) { CRM_CHECK(update != NULL, return); CRM_CHECK(update->type == XML_COMMENT_NODE, return); if (target == NULL) { target = pcmk__xc_match(parent, update, false); } if (target == NULL) { add_node_copy(parent, update); } else if (!pcmk__str_eq((const char *)target->content, (const char *)update->content, pcmk__str_casei)) { xmlFree(target->content); target->content = xmlStrdup(update->content); } } /*! * \internal * \brief Make one XML tree match another (in children and attributes) * * \param[in,out] parent If \p target is NULL and this is not, add or update * child of this XML node that matches \p update * \param[in,out] target If not NULL, update this XML * \param[in] update Make the desired XML match this (must not be NULL) * \param[in] as_diff If true, expand "++" when making attributes match * * \note At least one of \parent and \target must be non-NULL */ void pcmk__xml_update(xmlNode *parent, xmlNode *target, xmlNode *update, bool as_diff) { xmlNode *a_child = NULL; const char *object_name = NULL, *object_href = NULL, *object_href_val = NULL; #if XML_PARSER_DEBUG crm_log_xml_trace("update:", update); crm_log_xml_trace("target:", target); #endif CRM_CHECK(update != NULL, return); if (update->type == XML_COMMENT_NODE) { pcmk__xc_update(parent, target, update); return; } object_name = crm_element_name(update); object_href_val = ID(update); if (object_href_val != NULL) { object_href = XML_ATTR_ID; } else { object_href_val = crm_element_value(update, XML_ATTR_IDREF); object_href = (object_href_val == NULL) ? NULL : XML_ATTR_IDREF; } CRM_CHECK(object_name != NULL, return); CRM_CHECK(target != NULL || parent != NULL, return); if (target == NULL) { target = pcmk__xe_match(parent, object_name, object_href, object_href_val); } if (target == NULL) { target = create_xml_node(parent, object_name); CRM_CHECK(target != NULL, return); #if XML_PARSER_DEBUG crm_trace("Added <%s%s%s%s%s/>", crm_str(object_name), object_href ? " " : "", object_href ? object_href : "", object_href ? "=" : "", object_href ? object_href_val : ""); } else { crm_trace("Found node <%s%s%s%s%s/> to update", crm_str(object_name), object_href ? " " : "", object_href ? object_href : "", object_href ? "=" : "", object_href ? object_href_val : ""); #endif } CRM_CHECK(pcmk__str_eq(crm_element_name(target), crm_element_name(update), pcmk__str_casei), return); if (as_diff == FALSE) { /* So that expand_plus_plus() gets called */ copy_in_properties(target, update); } else { /* No need for expand_plus_plus(), just raw speed */ for (xmlAttrPtr a = pcmk__xe_first_attr(update); a != NULL; a = a->next) { const char *p_value = pcmk__xml_attr_value(a); /* Remove it first so the ordering of the update is preserved */ xmlUnsetProp(target, a->name); xmlSetProp(target, a->name, (pcmkXmlStr) p_value); } } for (a_child = pcmk__xml_first_child(update); a_child != NULL; a_child = pcmk__xml_next(a_child)) { #if XML_PARSER_DEBUG crm_trace("Updating child <%s%s%s%s%s/>", crm_str(object_name), object_href ? " " : "", object_href ? object_href : "", object_href ? "=" : "", object_href ? object_href_val : ""); #endif pcmk__xml_update(target, NULL, a_child, as_diff); } #if XML_PARSER_DEBUG crm_trace("Finished with <%s%s%s%s%s/>", crm_str(object_name), object_href ? " " : "", object_href ? object_href : "", object_href ? "=" : "", object_href ? object_href_val : ""); #endif } gboolean update_xml_child(xmlNode * child, xmlNode * to_update) { gboolean can_update = TRUE; xmlNode *child_of_child = NULL; CRM_CHECK(child != NULL, return FALSE); CRM_CHECK(to_update != NULL, return FALSE); if (!pcmk__str_eq(crm_element_name(to_update), crm_element_name(child), pcmk__str_casei)) { can_update = FALSE; } else if (!pcmk__str_eq(ID(to_update), ID(child), pcmk__str_casei)) { can_update = FALSE; } else if (can_update) { #if XML_PARSER_DEBUG crm_log_xml_trace(child, "Update match found..."); #endif pcmk__xml_update(NULL, child, to_update, false); } for (child_of_child = pcmk__xml_first_child(child); child_of_child != NULL; child_of_child = pcmk__xml_next(child_of_child)) { /* only update the first one */ if (can_update) { break; } can_update = update_xml_child(child_of_child, to_update); } return can_update; } int find_xml_children(xmlNode ** children, xmlNode * root, const char *tag, const char *field, const char *value, gboolean search_matches) { int match_found = 0; CRM_CHECK(root != NULL, return FALSE); CRM_CHECK(children != NULL, return FALSE); if (tag != NULL && !pcmk__str_eq(tag, crm_element_name(root), pcmk__str_casei)) { } else if (value != NULL && !pcmk__str_eq(value, crm_element_value(root, field), pcmk__str_casei)) { } else { if (*children == NULL) { *children = create_xml_node(NULL, __func__); } add_node_copy(*children, root); match_found = 1; } if (search_matches || match_found == 0) { xmlNode *child = NULL; for (child = pcmk__xml_first_child(root); child != NULL; child = pcmk__xml_next(child)) { match_found += find_xml_children(children, child, tag, field, value, search_matches); } } return match_found; } gboolean replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean delete_only) { gboolean can_delete = FALSE; xmlNode *child_of_child = NULL; const char *up_id = NULL; const char *child_id = NULL; const char *right_val = NULL; CRM_CHECK(child != NULL, return FALSE); CRM_CHECK(update != NULL, return FALSE); up_id = ID(update); child_id = ID(child); if (up_id == NULL || (child_id && strcmp(child_id, up_id) == 0)) { can_delete = TRUE; } if (!pcmk__str_eq(crm_element_name(update), crm_element_name(child), pcmk__str_casei)) { can_delete = FALSE; } if (can_delete && delete_only) { for (xmlAttrPtr a = pcmk__xe_first_attr(update); a != NULL; a = a->next) { const char *p_name = (const char *) a->name; const char *p_value = pcmk__xml_attr_value(a); right_val = crm_element_value(child, p_name); if (!pcmk__str_eq(p_value, right_val, pcmk__str_casei)) { can_delete = FALSE; } } } if (can_delete && parent != NULL) { crm_log_xml_trace(child, "Delete match found..."); if (delete_only || update == NULL) { free_xml(child); } else { xmlNode *tmp = copy_xml(update); xmlDoc *doc = tmp->doc; xmlNode *old = NULL; xml_accept_changes(tmp); old = xmlReplaceNode(child, tmp); if(xml_tracking_changes(tmp)) { /* Replaced sections may have included relevant ACLs */ pcmk__apply_acl(tmp); } xml_calculate_changes(old, tmp); xmlDocSetRootElement(doc, old); free_xml(old); } child = NULL; return TRUE; } else if (can_delete) { crm_log_xml_debug(child, "Cannot delete the search root"); can_delete = FALSE; } child_of_child = pcmk__xml_first_child(child); while (child_of_child) { xmlNode *next = pcmk__xml_next(child_of_child); can_delete = replace_xml_child(child, child_of_child, update, delete_only); /* only delete the first one */ if (can_delete) { child_of_child = NULL; } else { child_of_child = next; } } return can_delete; } xmlNode * sorted_xml(xmlNode *input, xmlNode *parent, gboolean recursive) { xmlNode *child = NULL; GSList *nvpairs = NULL; xmlNode *result = NULL; const char *name = NULL; CRM_CHECK(input != NULL, return NULL); name = crm_element_name(input); CRM_CHECK(name != NULL, return NULL); result = create_xml_node(parent, name); nvpairs = pcmk_xml_attrs2nvpairs(input); nvpairs = pcmk_sort_nvpairs(nvpairs); pcmk_nvpairs2xml_attrs(nvpairs, result); pcmk_free_nvpairs(nvpairs); for (child = pcmk__xml_first_child(input); child != NULL; child = pcmk__xml_next(child)) { if (recursive) { sorted_xml(child, result, recursive); } else { add_node_copy(result, child); } } return result; } xmlNode * first_named_child(const xmlNode *parent, const char *name) { xmlNode *match = NULL; for (match = pcmk__xe_first_child(parent); match != NULL; match = pcmk__xe_next(match)) { /* * name == NULL gives first child regardless of name; this is * semantically incorrect in this function, but may be necessary * due to prior use of xml_child_iter_filter */ if (pcmk__str_eq(name, (const char *)match->name, pcmk__str_null_matches)) { return match; } } return NULL; } /*! * \brief Get next instance of same XML tag * * \param[in] sibling XML tag to start from * * \return Next sibling XML tag with same name */ xmlNode * crm_next_same_xml(const xmlNode *sibling) { xmlNode *match = pcmk__xe_next(sibling); const char *name = crm_element_name(sibling); while (match != NULL) { if (!strcmp(crm_element_name(match), name)) { return match; } match = pcmk__xe_next(match); } return NULL; } void crm_xml_init(void) { static bool init = TRUE; if(init) { init = FALSE; /* The default allocator XML_BUFFER_ALLOC_EXACT does far too many * pcmk__realloc()s and it can take upwards of 18 seconds (yes, seconds) * to dump a 28kb tree which XML_BUFFER_ALLOC_DOUBLEIT can do in * less than 1 second. */ xmlSetBufferAllocationScheme(XML_BUFFER_ALLOC_DOUBLEIT); /* Populate and free the _private field when nodes are created and destroyed */ xmlDeregisterNodeDefault(free_private_data); xmlRegisterNodeDefault(new_private_data); crm_schema_init(); } } void crm_xml_cleanup(void) { crm_info("Cleaning up memory from libxml2"); crm_schema_cleanup(); xmlCleanupParser(); } #define XPATH_MAX 512 xmlNode * expand_idref(xmlNode * input, xmlNode * top) { const char *tag = NULL; const char *ref = NULL; xmlNode *result = input; if (result == NULL) { return NULL; } else if (top == NULL) { top = input; } tag = crm_element_name(result); ref = crm_element_value(result, XML_ATTR_IDREF); if (ref != NULL) { char *xpath_string = crm_strdup_printf("//%s[@id='%s']", tag, ref); result = get_xpath_object(xpath_string, top, LOG_ERR); if (result == NULL) { char *nodePath = (char *)xmlGetNodePath(top); crm_err("No match for %s found in %s: Invalid configuration", xpath_string, crm_str(nodePath)); free(nodePath); } free(xpath_string); } return result; } void crm_destroy_xml(gpointer data) { free_xml(data); } char * pcmk__xml_artefact_root(enum pcmk__xml_artefact_ns ns) { static const char *base = NULL; char *ret = NULL; if (base == NULL) { base = getenv("PCMK_schema_directory"); } if (pcmk__str_empty(base)) { base = CRM_SCHEMA_DIRECTORY; } switch (ns) { case pcmk__xml_artefact_ns_legacy_rng: case pcmk__xml_artefact_ns_legacy_xslt: ret = strdup(base); break; case pcmk__xml_artefact_ns_base_rng: case pcmk__xml_artefact_ns_base_xslt: ret = crm_strdup_printf("%s/base", base); break; default: crm_err("XML artefact family specified as %u not recognized", ns); } return ret; } char * pcmk__xml_artefact_path(enum pcmk__xml_artefact_ns ns, const char *filespec) { char *base = pcmk__xml_artefact_root(ns), *ret = NULL; switch (ns) { case pcmk__xml_artefact_ns_legacy_rng: case pcmk__xml_artefact_ns_base_rng: ret = crm_strdup_printf("%s/%s.rng", base, filespec); break; case pcmk__xml_artefact_ns_legacy_xslt: case pcmk__xml_artefact_ns_base_xslt: ret = crm_strdup_printf("%s/%s.xsl", base, filespec); break; default: crm_err("XML artefact family specified as %u not recognized", ns); } free(base); return ret; } void pcmk__xe_set_propv(xmlNodePtr node, va_list pairs) { while (true) { const char *name, *value; name = va_arg(pairs, const char *); if (name == NULL) { return; } value = va_arg(pairs, const char *); if (value == NULL) { return; } crm_xml_add(node, name, value); } } void pcmk__xe_set_props(xmlNodePtr node, ...) { va_list pairs; va_start(pairs, node); pcmk__xe_set_propv(node, pairs); va_end(pairs); } // Deprecated functions kept only for backward API compatibility xmlNode *find_entity(xmlNode *parent, const char *node_name, const char *id); xmlNode * find_entity(xmlNode *parent, const char *node_name, const char *id) { return pcmk__xe_match(parent, node_name, ((id == NULL)? id : XML_ATTR_ID), id); } diff --git a/m4/REQUIRE_HEADER.m4 b/m4/REQUIRE_HEADER.m4 new file mode 100644 index 0000000000..d723458200 --- /dev/null +++ b/m4/REQUIRE_HEADER.m4 @@ -0,0 +1,16 @@ +# REQUIRE_HEADER([HEADER]) +# +# Error if a C header file can't be found +# +dnl +dnl Copyright 2020 the Pacemaker project contributors +dnl +dnl The version control history for this file may have further details. +dnl +dnl This source code is licensed under the GNU General Public License version 2 +dnl or later (GPLv2+) WITHOUT ANY WARRANTY. + +dnl Usage: REQUIRE_HEADER(header-file, [prerequisite-includes]) +AC_DEFUN([REQUIRE_HEADER], [ + AC_CHECK_HEADERS([$1], [], [AC_MSG_ERROR(Could not find required C header $1)], [$2]) +]) diff --git a/version.m4 b/m4/version.m4 similarity index 100% rename from version.m4 rename to m4/version.m4 diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in index 0be98ab65f..1c3489b7f8 100644 --- a/rpm/pacemaker.spec.in +++ b/rpm/pacemaker.spec.in @@ -1,888 +1,853 @@ # User-configurable globals and defines to control package behavior # (these should not test {with X} values, which are declared later) ## User and group to use for nonprivileged services %global uname hacluster %global gname haclient ## Where to install Pacemaker documentation %if 0%{?suse_version} > 0 %global pcmk_docdir %{_docdir}/%{name}-%{version} %else %if 0%{?rhel} > 7 %global pcmk_docdir %{_docdir}/%{name}-doc %else %global pcmk_docdir %{_docdir}/%{name} %endif %endif ## GitHub entity that distributes source (for ease of using a fork) %global github_owner ClusterLabs ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) %global pcmkversion X.Y.Z %global specversion 1 ## Upstream commit (full commit ID, abbreviated commit ID, or tag) to build %global commit HEAD ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %global commit_abbrev 7 -## Python major version to use (2, 3, or 0 for auto-detect) -%global python_major 0 - # Define conditionals so that "rpmbuild --with " and # "rpmbuild --without " can enable and disable specific features ## Add option to enable support for stonith/external fencing agents %bcond_with stonithd ## Add option to enable support for storing sensitive information outside CIB %bcond_with cibsecrets ## Add option to create binaries suitable for use with profiling tools %bcond_with profiling ## Add option to create binaries with coverage analysis %bcond_with coverage ## Add option to skip generating documentation ## (the build tools aren't available everywhere) %bcond_without doc ## Add option to prefix package version with "0." ## (so later "official" packages will be considered updates) %bcond_with pre_release ## Add option to ship Upstart job files %bcond_with upstart_job ## Add option to turn off hardening of libraries and daemon executables %bcond_without hardening ## Add option to disable links for legacy daemon names %bcond_without legacy_links # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) ## Whether this is a tagged release (final or release candidate) %define tag_release %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo 1 ;; *%{rparen} echo 0 ;; esac) ## Portion of export/dist tarball name after "pacemaker-", and release version %if 0%{tag_release} %define archive_version %(c=%{commit}; echo ${c:10}) %define archive_github_url %{commit}#/%{name}-%{archive_version}.tar.gz %define pcmk_release %(c=%{commit}; case $c in *-rc[[:digit:]]*%{rparen} echo 0.%{specversion}.${c: -3} ;; *%{rparen} echo %{specversion} ;; esac) %else %define archive_version %(c=%{commit}; echo ${c:0:%{commit_abbrev}}) %define archive_github_url %{archive_version}#/%{name}-%{archive_version}.tar.gz %if %{with pre_release} %define pcmk_release 0.%{specversion}.%{archive_version}.git %else %define pcmk_release %{specversion}.%{archive_version}.git %endif %endif ## Whether this platform defaults to using systemd as an init system ## (needs to be evaluated prior to BuildRequires being enumerated and ## installed as it's intended to conditionally select some of these, and ## for that there are only few indicators with varying reliability: ## - presence of systemd-defined macros (when building in a full-fledged ## environment, which is not the case with ordinary mock-based builds) ## - systemd-aware rpm as manifested with the presence of particular ## macro (rpm itself will trivially always be present when building) ## - existence of /usr/lib/os-release file, which is something heavily ## propagated by systemd project ## - when not good enough, there's always a possibility to check ## particular distro-specific macros (incl. version comparison) %define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \ } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \ } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) %if 0%{?fedora} > 20 || 0%{?rhel} > 7 ## Base GnuTLS cipher priorities (presumably only the initial, required keyword) ## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'" %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM} %endif %if !%{defined _rundir} %if 0%{?fedora} >= 15 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1200 %define _rundir /run %else %define _rundir /var/run %endif %endif %if 0%{?fedora} > 22 || 0%{?rhel} > 7 %global supports_recommends 1 %endif ## Different distros name certain packages differently ## (note: corosync libraries also differ, but all provide corosync-devel) %if 0%{?suse_version} > 0 %global pkgname_bzip2_devel libbz2-devel %global pkgname_docbook_xsl docbook-xsl-stylesheets %global pkgname_gnutls_devel libgnutls-devel %global pkgname_shadow_utils shadow %global pkgname_procps procps %global pkgname_glue_libs libglue %global pkgname_pcmk_libs lib%{name}3 %global hacluster_id 90 %else %global pkgname_libtool_devel libtool-ltdl-devel %global pkgname_libtool_devel_arch libtool-ltdl-devel%{?_isa} %global pkgname_bzip2_devel bzip2-devel %global pkgname_docbook_xsl docbook-style-xsl %global pkgname_gnutls_devel gnutls-devel %global pkgname_shadow_utils shadow-utils %global pkgname_procps procps-ng %global pkgname_glue_libs cluster-glue-libs %global pkgname_pcmk_libs %{name}-libs %global hacluster_id 189 %endif # Python-related definitions -## Use Python 3 on certain platforms if major version not specified -%if %{?python_major} == 0 -%if 0%{?fedora} > 26 || 0%{?rhel} > 7 -%global python_major 3 -%endif -%endif - ## Turn off auto-compilation of Python files outside Python specific paths, ## so there's no risk that unexpected "__python" macro gets picked to do the ## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected) ## -- distro-dependent tricks or automake's fallback to be applied there %if %{defined _python_bytecompile_extra} %global _python_bytecompile_extra 0 %else ### the statement effectively means no RPM-native byte-compiling will occur at ### all, so distro-dependent tricks for Python-specific packages to be applied %global __os_install_post %(echo '%{__os_install_post}' | { sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) %endif -## Values that differ by Python major version -%if 0%{?python_major} > 2 +## Prefer Python 3 definitions explicitly, in case 2 is also available +%if %{defined __python3} %global python_name python3 -%global python_path %{?__python3}%{!?__python3:/usr/bin/python%{?python3_pkgversion}%{!?python3_pkgversion:3}} +%global python_path %{__python3} %define python_site %{?python3_sitelib}%{!?python3_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else -%if 0%{?python_major} > 1 -%global python_name python2 -%global python_path %{?__python2}%{!?__python2:/usr/bin/python%{?python2_pkgversion}%{!?python2_pkgversion:2}} -%define python_site %{?python2_sitelib}%{!?python2_sitelib:%( - %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} -%else %if %{defined python_version} %global python_name python%(echo %{python_version} | cut -d'.' -f1) %define python_path %{?__python}%{!?__python:/usr/bin/%{python_name}} %else %global python_name python %global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}} %endif %define python_site %{?python_sitelib}%{!?python_sitelib:%( %{python_name} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %endif -%endif -# Definitions for backward compatibility with older RPM versions - -## Ensure the license macro behaves consistently (older RPM will otherwise -## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts: -## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77 -%if !%{defined _licensedir} -%define description %{lua: - rpm.define("license %doc") - print("%description") -} -%endif - # Keep sane profiling data if requested %if %{with profiling} ## Disable -debuginfo package and stripping binaries/libraries %define debug_package %{nil} %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Url: https://www.clusterlabs.org/ Group: System Environment/Daemons # Example: https://codeload.github.com/ClusterLabs/pacemaker/tar.gz/e91769e # will download pacemaker-e91769e.tar.gz # # The ending part starting with '#' is ignored by github but necessary for # rpmbuild to know what the tar archive name is. (The downloaded file will be # named correctly only for commit IDs, not tagged releases.) # # You can use "spectool -s 0 pacemaker.spec" (rpmdevtools) to show final URL. Source0: https://codeload.github.com/%{github_owner}/%{name}/tar.gz/%{archive_github_url} Requires: resource-agents Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} %if !%{defined _unitdir} Requires: %{pkgname_procps} Requires: psmisc %endif %{?systemd_requires} Requires: %{python_path} BuildRequires: %{python_name}-devel # Pacemaker requires a minimum libqb functionality Requires: libqb >= 0.13.0 BuildRequires: libqb-devel >= 0.13.0 # Basics required for the build (even if usually satisfied through other BRs) BuildRequires: coreutils findutils grep sed # Required for core functionality BuildRequires: automake autoconf gcc libtool pkgconfig %{?pkgname_libtool_devel} -BuildRequires: pkgconfig(glib-2.0) >= 2.16 +BuildRequires: pkgconfig(glib-2.0) >= 2.32 BuildRequires: libxml2-devel libxslt-devel libuuid-devel BuildRequires: %{pkgname_bzip2_devel} # Enables optional functionality BuildRequires: ncurses-devel %{pkgname_docbook_xsl} BuildRequires: help2man %{pkgname_gnutls_devel} pam-devel pkgconfig(dbus-1) %if %{systemd_native} BuildRequires: pkgconfig(systemd) %endif Requires: corosync >= 2.0.0 BuildRequires: corosync-devel >= 2.0.0 %if %{with stonithd} BuildRequires: %{pkgname_glue_libs}-devel %endif %if %{with doc} -BuildRequires: inkscape asciidoc python3-sphinx +BuildRequires: inkscape asciidoc %{python_name}-sphinx %endif Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} # Bundled bits ## Pacemaker uses the crypto/md5-buffer module from gnulib %if 0%{?fedora} || 0%{?rhel} Provides: bundled(gnulib) %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager. It supports more than 16 node clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --with(out) : cibsecrets coverage doc stonithd hardening pre_release profiling upstart_job %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %if 0%{?supports_recommends} Recommends: pcmk-cluster-manager = %{version}-%{release} # For crm_report Recommends: tar Recommends: bzip2 %endif Requires: perl-TimeDate Requires: %{pkgname_procps} Requires: psmisc Requires(post):coreutils %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{pkgname_pcmk_libs} License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries Group: System Environment/Daemons Requires(pre): %{pkgname_shadow_utils} Requires: %{name}-schemas = %{version}-%{release} # sbd 1.4.0+ supports the libpe_status API for pe_working_set_t Conflicts: sbd < 1.4.0 %description -n %{pkgname_pcmk_libs} Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs} package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %description cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package remote %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Summary: Pacemaker remote daemon for non-cluster nodes Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents %if !%{defined _unitdir} Requires: %{pkgname_procps} %endif # -remote can be fully independent of systemd %{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}} Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description remote Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-remote package contains the Pacemaker Remote daemon which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{pkgname_pcmk_libs}-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: libuuid-devel%{?_isa} %{?pkgname_libtool_devel_arch} Requires: libxml2-devel%{?_isa} libxslt-devel%{?_isa} Requires: %{pkgname_bzip2_devel}%{?_isa} glib2-devel%{?_isa} Requires: libqb-devel%{?_isa} Requires: corosync-devel >= 2.0.0 %description -n %{pkgname_pcmk_libs}-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs}-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: %{python_path} Requires: %{pkgname_pcmk_libs} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: %{pkgname_procps} Requires: psmisc BuildArch: noarch -# systemd python bindings are separate package in some distros +# systemd Python bindings are a separate package in some distros %if %{defined systemd_requires} - %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Requires: %{python_name}-systemd -%else -%if 0%{?fedora} > 20 || 0%{?rhel} > 6 -Requires: systemd-python -%endif %endif - %endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: CC-BY-SA-4.0 Summary: Documentation for Pacemaker Group: Documentation BuildArch: noarch %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager. %package schemas License: GPLv2+ Summary: Schemas and upgrade stylesheets for Pacemaker BuildArch: noarch %description schemas Schemas and upgrade stylesheets for Pacemaker Pacemaker is an advanced, scalable High-Availability cluster resource manager. %prep %setup -q -n %{name}-%{archive_version} %build export systemdsystemunitdir=%{?_unitdir}%{!?_unitdir:no} %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will # use its own defaults otherwise; if such hardenings are completely # undesired, rpmbuild using "--without hardening" # (or "--define '_without_hardening 1'") export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %endif ./autogen.sh %{configure} \ PYTHON=%{python_path} \ %{!?with_hardening: --disable-hardening} \ %{!?with_legacy_links: --disable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{?with_cibsecrets: --with-cibsecrets} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ --with-initdir=%{_initrddir} \ --with-runstatedir=%{_rundir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif make %{_smp_mflags} V=1 %check make %{_smp_mflags} check { cts/cts-scheduler --run load-stopped-loop \ && cts/cts-cli \ && touch .CHECKED } 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint [ -f .CHECKED ] && rm -f -- .CHECKED exit $? # TODO remove when rpm<4.14 compatibility irrelevant %install # skip automake-native Python byte-compilation, since RPM-native one (possibly # distro-confined to Python-specific directories, which is currently the only # relevant place, anyway) assures proper intrinsic alignment with wider system # (such as with py_byte_compile macro, which is concurrent Fedora/EL specific) make install \ DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \ %{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 daemons/pacemakerd/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon %if %{with upstart_job} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf install -m 644 pacemakerd/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf %endif %if %{defined _unitdir} mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif # Don't package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # For now, don't package the servicelog-related binaries built only for # ppc64le when certain dependencies are installed. If they get more exercise by # advanced users, we can reconsider. rm -f %{buildroot}/%{_sbindir}/notifyServicelogEvent rm -f %{buildroot}/%{_sbindir}/ipmiservicelogd # Don't ship init scripts for systemd based platforms %if %{defined _unitdir} rm -f %{buildroot}/%{_initrddir}/pacemaker rm -f %{buildroot}/%{_initrddir}/pacemaker_remote %endif # Byte-compile Python sources where suitable and the distro procedures known %if %{defined py_byte_compile} %{py_byte_compile %{python_path} %{buildroot}%{_datadir}/pacemaker/tests} %if !%{defined _python_bytecompile_extra} %{py_byte_compile %{python_path} %{buildroot}%{python_site}/cts} %endif %endif %if %{with coverage} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %post %if %{defined _unitdir} %systemd_post pacemaker.service %else /sbin/chkconfig --add pacemaker || : %endif %preun %if %{defined _unitdir} %systemd_preun pacemaker.service %else /sbin/service pacemaker stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker || : fi %endif %postun %if %{defined _unitdir} %systemd_postun_with_restart pacemaker.service %endif %pre remote %if %{defined _unitdir} # Stop the service before anything is touched, and remember to restart # it as one of the last actions (compared to using systemd_postun_with_restart, # this avoids suicide when sbd is in use) systemctl --quiet is-active pacemaker_remote if [ $? -eq 0 ] ; then mkdir -p %{_localstatedir}/lib/rpm-state/%{name} touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote systemctl stop pacemaker_remote >/dev/null 2>&1 else rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post remote %if %{defined _unitdir} %systemd_post pacemaker_remote.service %else /sbin/chkconfig --add pacemaker_remote || : %endif %preun remote %if %{defined _unitdir} %systemd_preun pacemaker_remote.service %else /sbin/service pacemaker_remote stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker_remote || : fi %endif %postun remote %if %{defined _unitdir} # This next line is a no-op, because we stopped the service earlier, but # we leave it here because it allows us to revert to the standard behavior # in the future if desired %systemd_postun_with_restart pacemaker_remote.service # Explicitly take care of removing the flag-file(s) upon final removal if [ "$1" -eq 0 ] ; then rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %posttrans remote %if %{defined _unitdir} if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then systemctl start pacemaker_remote >/dev/null 2>&1 rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post cli %if %{defined _unitdir} %systemd_post crm_mon.service %endif if [ "$1" -eq 2 ]; then # Package upgrade, not initial install: # Move any pre-2.0 logs to new location to ensure they get rotated { mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \ || mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker } >/dev/null 2>/dev/null || : fi %preun cli %if %{defined _unitdir} %systemd_preun crm_mon.service %endif %postun cli %if %{defined _unitdir} %systemd_postun_with_restart crm_mon.service %endif %pre -n %{pkgname_pcmk_libs} getent group %{gname} >/dev/null || groupadd -r %{gname} -g %{hacluster_id} getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} -s /sbin/nologin -c "cluster user" %{uname} exit 0 %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets -n %{pkgname_pcmk_libs} %ldconfig_scriptlets cluster-libs %else %post -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %postun -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %post cluster-libs -p /sbin/ldconfig %postun cluster-libs -p /sbin/ldconfig %endif %files ########################################################### %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %if %{defined _unitdir} %{_unitdir}/pacemaker.service %else %{_initrddir}/pacemaker %endif %exclude %{_libexecdir}/pacemaker/cts-log-watcher %exclude %{_libexecdir}/pacemaker/cts-support %exclude %{_sbindir}/pacemaker-remoted %if %{with legacy_links} %exclude %{_sbindir}/pacemaker_remoted %endif %{_libexecdir}/pacemaker/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/fence_legacy %doc %{_mandir}/man7/pacemaker-controld.* %doc %{_mandir}/man7/pacemaker-schedulerd.* %doc %{_mandir}/man7/pacemaker-fenced.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* %doc %{_mandir}/man7/ocf_pacemaker_o2cb.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/fence_legacy.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_datadir}/pacemaker/alerts %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine /usr/lib/ocf/resource.d/pacemaker/controld /usr/lib/ocf/resource.d/pacemaker/o2cb /usr/lib/ocf/resource.d/pacemaker/remote %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/pacemaker.conf %config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf %endif %files cli %dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %if %{defined _unitdir} %{_unitdir}/crm_mon.service %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/crm_mon.conf %endif %{_sbindir}/attrd_updater %{_sbindir}/cibadmin %if %{with cibsecrets} %{_sbindir}/cibsecret %endif %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount %{_sbindir}/crm_mon %{_sbindir}/crm_node %{_sbindir}/crm_resource %{_sbindir}/crm_rule %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/crm_shadow %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket %{_sbindir}/stonith_admin # "dirname" is owned by -schemas, which is a prerequisite %{_datadir}/pacemaker/report.collector %{_datadir}/pacemaker/report.common # XXX "dirname" is not owned by any prerequisite %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude /usr/lib/ocf/resource.d/pacemaker/controld %exclude /usr/lib/ocf/resource.d/pacemaker/o2cb %exclude /usr/lib/ocf/resource.d/pacemaker/remote %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %doc %{_mandir}/man7/* %exclude %{_mandir}/man7/pacemaker-controld.* %exclude %{_mandir}/man7/pacemaker-schedulerd.* %exclude %{_mandir}/man7/pacemaker-fenced.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_master.* %exclude %{_mandir}/man8/fence_legacy.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles %files -n %{pkgname_pcmk_libs} %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpacemaker.so.* %{_libdir}/libstonithd.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files cluster-libs %{_libdir}/libcrmcluster.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files remote %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} # state directory is shared between the subpackets # let rpm take care of removing it once it isn't # referenced anymore and empty %ghost %dir %{_localstatedir}/lib/rpm-state/%{name} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote %endif %{_sbindir}/pacemaker-remoted %if %{with legacy_links} %{_sbindir}/pacemaker_remoted %endif %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files doc %doc %{pcmk_docdir} %license licenses/CC-BY-SA-4.0 %files cts %{python_site}/cts %{_datadir}/pacemaker/tests %{_libexecdir}/pacemaker/cts-log-watcher %{_libexecdir}/pacemaker/cts-support %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files -n %{pkgname_pcmk_libs}-devel %{_includedir}/pacemaker %{_libdir}/*.so %if %{with coverage} %{_var}/lib/pacemaker/gcov %endif %{_libdir}/pkgconfig/*.pc %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files schemas %license licenses/GPLv2 %dir %{_datadir}/pacemaker %{_datadir}/pacemaker/*.rng %{_datadir}/pacemaker/*.xsl %{_datadir}/pacemaker/api %{_datadir}/pkgconfig/pacemaker-schemas.pc %changelog * PACKAGE_DATE ClusterLabs PACKAGE_VERSION-1 - See included ChangeLog file for details diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 9663e68b18..2fc9a865d0 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,2049 +1,2049 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources" enum rsc_command { cmd_none = 0, // No command option given (yet) cmd_ban, cmd_cleanup, cmd_clear, cmd_colocations, cmd_colocations_deep, cmd_cts, cmd_delete, cmd_delete_param, cmd_execute_agent, cmd_fail, cmd_get_param, cmd_get_property, cmd_list_active_ops, cmd_list_agents, cmd_list_all_ops, cmd_list_alternatives, cmd_list_instances, cmd_list_providers, cmd_list_resources, cmd_list_standards, cmd_locate, cmd_metadata, cmd_move, cmd_query_raw_xml, cmd_query_xml, cmd_refresh, cmd_restart, cmd_set_param, cmd_set_property, cmd_wait, cmd_why, }; struct { enum rsc_command rsc_cmd; // The crm_resource command to perform const char *attr_set_type; int cib_options; gboolean clear_expired; int find_flags; /* Flags to use when searching for resource */ gboolean force; gchar *host_uname; gchar *interval_spec; gchar *move_lifetime; gchar *operation; GHashTable *override_params; gchar *prop_id; char *prop_name; gchar *prop_set; gchar *prop_value; gboolean recursive; gchar **remainder; gboolean require_cib; // Whether command requires CIB connection gboolean require_crmd; /* whether command requires controller connection */ gboolean require_dataset; /* whether command requires populated dataset instance */ gboolean require_resource; /* whether command requires that resource be specified */ int resource_verbose; gchar *rsc_id; gchar *rsc_type; gboolean promoted_role_only; int timeout_ms; char *agent_spec; // Standard and/or provider and/or agent char *v_agent; char *v_class; char *v_provider; gboolean validate_cmdline; /* whether we are just validating based on command line options */ GHashTable *validate_options; gchar *xml_file; } options = { .attr_set_type = XML_TAG_ATTR_SETS, .cib_options = cib_sync_call, .require_cib = TRUE, .require_dataset = TRUE, .require_resource = TRUE, }; #if 0 // @COMPAT @TODO enable this at next backward compatibility break #define SET_COMMAND(cmd) do { \ if (options.rsc_cmd != cmd_none) { \ g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE, \ "Only one command option may be specified"); \ return FALSE; \ } \ options.rsc_cmd = (cmd); \ } while (0) #else #define SET_COMMAND(cmd) do { options.rsc_cmd = (cmd); } while (0) #endif gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean option_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean restart_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); static crm_exit_t exit_code = CRM_EX_OK; static pcmk__output_t *out = NULL; // Things that should be cleaned up on exit static GError *error = NULL; static GMainLoop *mainloop = NULL; static cib_t *cib_conn = NULL; static pcmk_ipc_api_t *controld_api = NULL; static pe_working_set_t *data_set = NULL; #define MESSAGE_TIMEOUT_S 60 #define INDENT " " static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; // Clean up and exit static crm_exit_t bye(crm_exit_t ec) { if (error != NULL) { if (out != NULL) { out->err(out, "%s: %s", g_get_prgname(), error->message); } else { fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message); } g_clear_error(&error); } if (out != NULL) { out->finish(out, ec, true, NULL); pcmk__output_free(out); } if (cib_conn != NULL) { cib_t *save_cib_conn = cib_conn; cib_conn = NULL; // Ensure we can't free this twice save_cib_conn->cmds->signoff(save_cib_conn); cib_delete(save_cib_conn); } if (controld_api != NULL) { pcmk_ipc_api_t *save_controld_api = controld_api; controld_api = NULL; // Ensure we can't free this twice pcmk_free_ipc_api(save_controld_api); } if (mainloop != NULL) { g_main_loop_unref(mainloop); mainloop = NULL; } pe_free_working_set(data_set); data_set = NULL; crm_exit(ec); return ec; } static void quit_main_loop(crm_exit_t ec) { exit_code = ec; if (mainloop != NULL) { GMainLoop *mloop = mainloop; mainloop = NULL; // Don't re-enter this block pcmk_quit_main_loop(mloop, 10); g_main_loop_unref(mloop); } } static gboolean resource_ipc_timeout(gpointer data) { // Start with newline because "Waiting for ..." message doesn't have one if (error != NULL) { g_clear_error(&error); } g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT, "Aborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S); quit_main_loop(CRM_EX_TIMEOUT); return FALSE; } static void controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data, void *user_data) { switch (event_type) { case pcmk_ipc_event_disconnect: if (exit_code == CRM_EX_DISCONNECT) { // Unexpected crm_info("Connection to controller was terminated"); } quit_main_loop(exit_code); break; case pcmk_ipc_event_reply: if (status != CRM_EX_OK) { out->err(out, "Error: bad reply from controller: %s", crm_exit_str(status)); pcmk_disconnect_ipc(api); quit_main_loop(status); } else { if ((pcmk_controld_api_replies_expected(api) == 0) && mainloop && g_main_loop_is_running(mainloop)) { out->info(out, "... got reply (done)"); crm_debug("Got all the replies we expected"); pcmk_disconnect_ipc(api); quit_main_loop(CRM_EX_OK); } else { out->info(out, "... got reply"); } } break; default: break; } } static void start_mainloop(pcmk_ipc_api_t *capi) { unsigned int count = pcmk_controld_api_replies_expected(capi); if (count > 0) { out->info(out, "Waiting for %d %s from the controller", count, pcmk__plural_alt(count, "reply", "replies")); exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects mainloop = g_main_loop_new(NULL, FALSE); g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); g_main_loop_run(mainloop); } } static int compare_id(gconstpointer a, gconstpointer b) { return strcmp((const char *)a, (const char *)b); } static GListPtr build_constraint_list(xmlNode *root) { GListPtr retval = NULL; xmlNode *cib_constraints = NULL; xmlXPathObjectPtr xpathObj = NULL; int ndx = 0; cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root); xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION); for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) { xmlNode *match = getXpathResult(xpathObj, ndx); retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id); } freeXpathObject(xpathObj); return retval; } /* short option letters still available: eEJkKXyYZ */ static GOptionEntry query_entries[] = { { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List all cluster resources with status", NULL }, { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List IDs of all instantiated resources (individual members\n" INDENT "rather than groups etc.)", NULL }, { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, NULL, NULL }, { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List active resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List all resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_standards_cb, "List supported standards", NULL }, { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_providers_cb, "List all available OCF providers", NULL }, { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, list_agents_cb, "List all agents available for the named standard and/or provider", "STD/PROV" }, { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, list_alternatives_cb, "List all available providers for the named OCF agent", "AGENT" }, { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, metadata_cb, "Show the metadata for the named class:provider:agent", "SPEC" }, { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show XML configuration of resource (after any template expansion)", NULL }, { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show XML configuration of resource (before any template expansion)", NULL }, { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb, "Display named parameter for resource (use instance attribute\n" INDENT "unless --meta or --utilization is specified)", "PARAM" }, { "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb, "Display named property of resource ('class', 'type', or 'provider') " "(requires --resource)", "PROPERTY" }, { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show node(s) currently running resource", NULL }, { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Display the (co)location constraints that apply to a resource\n" INDENT "and the resources is it colocated with", NULL }, { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Display the (co)location constraints that apply to a resource", NULL }, { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb, "Show why resources are not running, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { NULL } }; static GOptionEntry command_entries[] = { { "validate", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "Validate resource configuration by calling agent's validate-all\n" INDENT "action. The configuration may be specified either by giving an\n" INDENT "existing resource name with -r, or by specifying --class,\n" INDENT "--agent, and --provider arguments, along with any number of\n" INDENT "--option arguments.", NULL }, { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb, "If resource has any past failures, clear its history and fail\n" INDENT "count. Optionally filtered by --resource, --node, --operation\n" INDENT "and --interval (otherwise all). --operation and --interval\n" INDENT "apply to fail counts, but entire history is always clear, to\n" INDENT "allow current state to be rechecked. If the named resource is\n" INDENT "part of a group, or one numbered instance of a clone or bundled\n" INDENT "resource, the clean-up applies to the whole collective resource\n" INDENT "unless --force is given.", NULL }, { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb, "Delete resource's history (including failures) so its current state\n" INDENT "is rechecked. Optionally filtered by --resource and --node\n" INDENT "(otherwise all). If the named resource is part of a group, or one\n" INDENT "numbered instance of a clone or bundled resource, the refresh\n" INDENT "applies to the whole collective resource unless --force is given.", NULL }, { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb, "Set named parameter for resource (requires -v). Use instance\n" INDENT "attribute unless --meta or --utilization is specified.", "PARAM" }, { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb, "Delete named parameter for resource. Use instance attribute\n" INDENT "unless --meta or --utilization is specified.", "PARAM" }, { "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb, "Set named property of resource ('class', 'type', or 'provider') " "(requires -r, -t, -v)", "PROPERTY" }, { NULL } }; static GOptionEntry location_entries[] = { { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Create a constraint to move resource. If --node is specified,\n" INDENT "the constraint will be to move to that node, otherwise it\n" INDENT "will be to ban the current node. Unless --force is specified\n" INDENT "this will return an error if the resource is already running\n" INDENT "on the specified node. If --force is specified, this will\n" INDENT "always ban the current node.\n" INDENT "Optional: --lifetime, --master. NOTE: This may prevent the\n" INDENT "resource from running on its previous location until the\n" INDENT "implicit constraint expires or is removed with --clear.", NULL }, { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Create a constraint to keep resource off a node.\n" INDENT "Optional: --node, --lifetime, --master.\n" INDENT "NOTE: This will prevent the resource from running on the\n" INDENT "affected node until the implicit constraint expires or is\n" INDENT "removed with --clear. If --node is not specified, it defaults\n" INDENT "to the node currently running the resource for primitives\n" INDENT "and groups, or the master for promotable clones with\n" INDENT "promoted-max=1 (all other situations result in an error as\n" INDENT "there is no sane default).", NULL }, { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Remove all constraints created by the --ban and/or --move\n" INDENT "commands. Requires: --resource. Optional: --node, --master,\n" INDENT "--expired. If --node is not specified, all constraints created\n" INDENT "by --ban and --move will be removed for the named resource. If\n" INDENT "--node and --force are specified, any constraint created by\n" INDENT "--move will be cleared, even if it is not for the specified\n" INDENT "node. If --expired is specified, only those constraints whose\n" INDENT "lifetimes have expired will be removed.", NULL }, { "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb, "Modifies the --clear argument to remove constraints with\n" INDENT "expired lifetimes.", NULL }, { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime, "Lifespan (as ISO 8601 duration) of created constraints (with\n" INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)", "TIMESPEC" }, { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only, "Limit scope of command to Master role (with -B, -M, -U). For\n" INDENT "-B and -M the previous master may remain active in the Slave role.", NULL }, { NULL } }; static GOptionEntry advanced_entries[] = { { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb, "(Advanced) Delete a resource from the CIB. Required: -t", NULL }, { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb, "(Advanced) Tell the cluster this resource has failed", NULL }, { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb, "(Advanced) Tell the cluster to restart this resource and\n" INDENT "anything that depends on it", NULL }, { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb, "(Advanced) Wait until the cluster settles into a stable state", NULL }, { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and demote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and stop a resource on the local node", NULL }, { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and start a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and promote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-check", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and check the state of a resource on\n" INDENT "the local node", NULL }, { NULL } }; static GOptionEntry validate_entries[] = { { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb, "The standard the resource agent confirms to (for example, ocf).\n" INDENT "Use with --agent, --provider, --option, and --validate.", "CLASS" }, { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, "The agent to use (for example, IPaddr). Use with --class,\n" INDENT "--provider, --option, and --validate.", "AGENT" }, { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, "The vendor that supplies the resource agent (for example,\n" INDENT "heartbeat). Use with --class, --agent, --option, and --validate.", "PROVIDER" }, { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb, "Specify a device configuration parameter as NAME=VALUE (may be\n" INDENT "specified multiple times). Use with --validate and without the\n" INDENT "-r option.", "PARAM" }, { NULL } }; static GOptionEntry addl_entries[] = { { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname, "Node name", "NAME" }, { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive, "Follow colocation chains when using --set-parameter", NULL }, { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type, "Resource XML element (primitive, group, etc.) (with -D)", "ELEMENT" }, { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value, "Value to use with -p", "PARAM" }, { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource meta-attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource utilization attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation, "Operation to clear instead of all (with -C -r)", "OPERATION" }, { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec, "Interval of operation to clear (default 0) (with -C -r -n)", "N" }, { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set, "(Advanced) XML ID of attributes element to use (with -p, -d)", "ID" }, { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id, "(Advanced) XML ID of nvpair element to use (with -p, -d)", "ID" }, { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb, "(Advanced) Abort if command does not finish in this time (with\n" INDENT "--restart, --wait, --force-*)", "N" }, { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force, "If making CIB changes, do so regardless of quorum. See help for\n" INDENT "individual commands for additional behavior.", NULL }, { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file, NULL, "FILE" }, { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname, NULL, "HOST" }, { NULL } }; gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.validate_cmdline = TRUE; options.require_resource = FALSE; if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) { if (options.v_provider) { free(options.v_provider); } options.v_provider = strdup(optarg); } else { if (options.v_agent) { free(options.v_agent); } options.v_agent = strdup(optarg); } return TRUE; } gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) { options.attr_set_type = XML_TAG_META_SETS; } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) { options.attr_set_type = XML_TAG_UTILIZATION; } return TRUE; } gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) { if (!out->is_quiet(out)) { g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, "Standard %s does not support parameters\n", optarg); } return FALSE; } else { if (options.v_class != NULL) { free(options.v_class); } options.v_class = strdup(optarg); } options.validate_cmdline = TRUE; options.require_resource = FALSE; return TRUE; } gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) { SET_COMMAND(cmd_cleanup); } else { SET_COMMAND(cmd_refresh); } options.require_resource = FALSE; if (getenv("CIB_file") == NULL) { options.require_crmd = TRUE; } options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_dataset = FALSE; SET_COMMAND(cmd_delete); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.clear_expired = TRUE; options.require_resource = FALSE; return TRUE; } static void get_agent_spec(const gchar *optarg) { options.require_cib = FALSE; options.require_dataset = FALSE; options.require_resource = FALSE; if (options.agent_spec != NULL) { free(options.agent_spec); options.agent_spec = NULL; } if (optarg != NULL) { options.agent_spec = strdup(optarg); } } gboolean list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_agents); get_agent_spec(optarg); return TRUE; } gboolean list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_providers); get_agent_spec(optarg); return TRUE; } gboolean list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_standards); options.require_cib = FALSE; options.require_dataset = FALSE; options.require_resource = FALSE; return TRUE; } gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_alternatives); get_agent_spec(optarg); return TRUE; } gboolean metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_metadata); get_agent_spec(optarg); return TRUE; } gboolean option_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { char *name = NULL; char *value = NULL; if (pcmk_scan_nvpair(optarg, &name, &value) != 2) { return FALSE; } if (options.validate_options == NULL) { options.validate_options = crm_str_table_new(); } g_hash_table_replace(options.validate_options, name, value); return TRUE; } gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_crmd = TRUE; SET_COMMAND(cmd_fail); return TRUE; } gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_clear); } else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_ban); } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_move); } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) { options.find_flags = pe_find_renamed|pe_find_any; SET_COMMAND(cmd_query_xml); } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) { options.find_flags = pe_find_renamed|pe_find_any; SET_COMMAND(cmd_query_raw_xml); } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_locate); } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_colocations_deep); } else { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_colocations); } return TRUE; } gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) { SET_COMMAND(cmd_get_param); } else { SET_COMMAND(cmd_get_property); } if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) { SET_COMMAND(cmd_cts); } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) { SET_COMMAND(cmd_list_resources); } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) { SET_COMMAND(cmd_list_instances); } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) { SET_COMMAND(cmd_list_active_ops); } else { SET_COMMAND(cmd_list_all_ops); } options.require_resource = FALSE; return TRUE; } gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) { SET_COMMAND(cmd_set_param); } else { SET_COMMAND(cmd_delete_param); } if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_dataset = FALSE; if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); SET_COMMAND(cmd_set_property); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.timeout_ms = crm_get_msec(optarg); return TRUE; } gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_execute_agent); if (options.operation) { g_free(options.operation); } options.operation = g_strdup(option_name + 2); // skip "--" options.find_flags = pe_find_renamed|pe_find_anon; options.override_params = crm_str_table_new(); return TRUE; } gboolean restart_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_restart); options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_wait); options.require_resource = FALSE; options.require_dataset = FALSE; return TRUE; } gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_resource = FALSE; SET_COMMAND(cmd_why); options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } static int ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; pe_node_t *current = NULL; unsigned int nactive = 0; CRM_CHECK(rsc != NULL, return EINVAL); current = pe__find_active_requires(rsc, &nactive); if (nactive == 1) { rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { int count = 0; GListPtr iter = NULL; current = NULL; for(iter = rsc->children; iter; iter = iter->next) { pe_resource_t *child = (pe_resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { count++; current = pe__current_node(child); } } if(count == 1 && current) { rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } else { rc = EINVAL; *exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Resource '%s' not moved: active in %d locations (promoted in %d).\n" "To prevent '%s' from running on a specific location, " "specify a node." "To prevent '%s' from being promoted at a specific " "location, specify a node and the master option.", options.rsc_id, nactive, count, options.rsc_id, options.rsc_id); } } else { rc = EINVAL; *exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Resource '%s' not moved: active in %d locations.\n" "To prevent '%s' from running on a specific location, " "specify a node.", options.rsc_id, nactive, options.rsc_id); } return rc; } static void cleanup(pcmk__output_t *out, pe_resource_t *rsc) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Erasing failures of %s (%s requested) on %s", rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, options.operation, options.interval_spec, TRUE, data_set, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, cib_conn, rsc); } if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } static int clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) { GListPtr before = NULL; GListPtr after = NULL; GListPtr remaining = NULL; GListPtr ele = NULL; pe_node_t *dest = NULL; int rc = pcmk_rc_ok; if (!out->is_quiet(out)) { before = build_constraint_list(data_set->input); } if (options.clear_expired) { rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options, options.rsc_id, options.host_uname, options.promoted_role_only); } else if (options.host_uname) { dest = pe_find_node(data_set->nodes, options.host_uname); if (dest == NULL) { rc = pcmk_rc_node_unknown; if (!out->is_quiet(out)) { g_list_free(before); } return rc; } rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL, cib_conn, options.cib_options, TRUE, options.force); } else { rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes, cib_conn, options.cib_options, TRUE, options.force); } if (!out->is_quiet(out)) { rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Could not get modified CIB: %s\n", pcmk_strerror(rc)); g_list_free(before); return rc; } data_set->input = *cib_xml_copy; cluster_status(data_set); after = build_constraint_list(data_set->input); remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp); for (ele = remaining; ele != NULL; ele = ele->next) { out->info(out, "Removing constraint: %s", (char *) ele->data); } g_list_free(before); g_list_free(after); g_list_free(remaining); } return rc; } static int -delete() +delete(void) { int rc = pcmk_rc_ok; xmlNode *msg_data = NULL; if (options.rsc_type == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "You need to specify a resource type with -t"); return rc; } msg_data = create_xml_node(NULL, options.rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id); rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, options.cib_options); rc = pcmk_legacy2rc(rc); free_xml(msg_data); return rc; } static int list_agents(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; char *provider = strchr(agent_spec, ':'); lrmd_t *lrmd_conn = lrmd_api_new(); lrmd_list_t *list = NULL; if (provider) { *provider++ = 0; } rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider); if (rc > 0) { rc = out->message(out, "agents-list", list, agent_spec, provider); } else { rc = pcmk_rc_error; } if (rc != pcmk_rc_ok) { *exit_code = CRM_EX_NOSUCH; if (provider == NULL) { g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No agents found for standard '%s'", agent_spec); } else { g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No agents found for standard '%s' and provider '%s'", agent_spec, provider); } } lrmd_api_delete(lrmd_conn); return rc; } static int list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc; const char *text = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); lrmd_list_t *list = NULL; switch (options.rsc_cmd) { case cmd_list_alternatives: rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); if (rc > 0) { rc = out->message(out, "alternatives-list", list, agent_spec); } else { rc = pcmk_rc_error; } text = "OCF providers"; break; case cmd_list_standards: rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); if (rc > 0) { rc = out->message(out, "standards-list", list); } else { rc = pcmk_rc_error; } text = "standards"; break; case cmd_list_providers: rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); if (rc > 0) { rc = out->message(out, "providers-list", list, agent_spec); } else { rc = pcmk_rc_error; } text = "OCF providers"; break; default: *exit_code = CRM_EX_SOFTWARE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Bug"); lrmd_api_delete(lrmd_conn); return pcmk_rc_error; } if (rc != pcmk_rc_ok) { if (agent_spec != NULL) { *exit_code = CRM_EX_NOSUCH; rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No %s found for %s", text, agent_spec); } else { *exit_code = CRM_EX_NOSUCH; rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No %s found", text); } } lrmd_api_delete(lrmd_conn); return rc; } static int populate_working_set(xmlNodePtr *cib_xml_copy) { int rc = pcmk_rc_ok; if (options.xml_file != NULL) { *cib_xml_copy = filename2xml(options.xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); } if(rc != pcmk_rc_ok) { return rc; } /* Populate the working set instance */ data_set = pe_new_working_set(); if (data_set == NULL) { rc = ENOMEM; return rc; } pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); rc = update_working_set_xml(data_set, cib_xml_copy); if (rc == pcmk_rc_ok) { cluster_status(data_set); } return rc; } static int refresh(pcmk__output_t *out) { int rc = pcmk_rc_ok; const char *router_node = options.host_uname; int attr_options = pcmk__node_attr_none; if (options.host_uname) { pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname); if (pe__is_guest_or_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "No cluster connection to Pacemaker Remote node %s detected", options.host_uname); return rc; } router_node = node->details->uname; attr_options |= pcmk__node_attr_remote; } } if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", options.host_uname? options.host_uname : "all nodes"); rc = pcmk_rc_ok; return rc; } crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes"); rc = pcmk__node_attr_request_clear(NULL, options.host_uname, NULL, NULL, NULL, NULL, attr_options); if (pcmk_controld_api_reprobe(controld_api, options.host_uname, router_node) == pcmk_rc_ok) { start_mainloop(controld_api); } return rc; } static void refresh_resource(pcmk__output_t *out, pe_resource_t *rsc) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Re-checking the state of %s (%s requested) on %s", rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, NULL, 0, FALSE, data_set, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, cib_conn, rsc); } if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } static int -set_property() +set_property(void) { int rc = pcmk_rc_ok; xmlNode *msg_data = NULL; if (pcmk__str_empty(options.rsc_type)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Must specify -t with resource type"); rc = ENXIO; return rc; } else if (pcmk__str_empty(options.prop_value)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Must supply -v with new value"); rc = EINVAL; return rc; } CRM_LOG_ASSERT(options.prop_name != NULL); msg_data = create_xml_node(NULL, options.rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id); crm_xml_add(msg_data, options.prop_name, options.prop_value); rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, options.cib_options); rc = pcmk_legacy2rc(rc); free_xml(msg_data); return rc; } static int show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; char *standard = NULL; char *provider = NULL; char *type = NULL; char *metadata = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, provider, type, &metadata, 0); rc = pcmk_legacy2rc(rc); if (metadata) { out->output_xml(out, "metadata", metadata); } else { *exit_code = crm_errno2exit(rc); g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Metadata query for %s failed: %s", agent_spec, pcmk_rc_str(rc)); } } else { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "'%s' is not a valid agent specification", agent_spec); } lrmd_api_delete(lrmd_conn); return rc; } static void validate_cmdline(crm_exit_t *exit_code) { // -r cannot be used with any of --class, --agent, or --provider if (options.rsc_id != NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--resource cannot be used with --class, --agent, and --provider"); // If --class, --agent, or --provider are given, --validate must also be given. } else if (options.rsc_cmd != cmd_execute_agent) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--class, --agent, and --provider require --validate"); // Not all of --class, --agent, and --provider need to be given. Not all // classes support the concept of a provider. Check that what we were given // is valid. } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) { if (options.v_provider != NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "stonith does not support providers"); } else if (stonith_agent_exists(options.v_agent, 0) == FALSE) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s is not a known stonith agent", options.v_agent ? options.v_agent : ""); } } else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s:%s:%s is not a known resource", options.v_class ? options.v_class : "", options.v_provider ? options.v_provider : "", options.v_agent ? options.v_agent : ""); } if (error == NULL) { if (options.validate_options == NULL) { options.validate_options = crm_str_table_new(); } *exit_code = cli_resource_execute_from_params(out, "test", options.v_class, options.v_provider, options.v_agent, "validate-all", options.validate_options, options.override_params, options.timeout_ms, options.resource_verbose, options.force); } } static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; GOptionEntry extra_prog_entries[] = { { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet), "Be less descriptive in output.", NULL }, { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id, "Resource ID", "ID" }, { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder, NULL, NULL }, { NULL } }; const char *description = "Examples:\n\n" "List the available OCF agents:\n\n" "\t# crm_resource --list-agents ocf\n\n" "List the available OCF agents from the linux-ha project:\n\n" "\t# crm_resource --list-agents ocf:heartbeat\n\n" "Move 'myResource' to a specific node:\n\n" "\t# crm_resource --resource myResource --move --node altNode\n\n" "Allow (but not force) 'myResource' to move back to its original " "location:\n\n" "\t# crm_resource --resource myResource --clear\n\n" "Stop 'myResource' (and anything that depends on it):\n\n" "\t# crm_resource --resource myResource --set-parameter target-role " "--meta --parameter-value Stopped\n\n" "Tell the cluster not to manage 'myResource' (the cluster will not " "attempt to start or stop the\n" "resource under any circumstances; useful when performing maintenance " "tasks on a resource):\n\n" "\t# crm_resource --resource myResource --set-parameter is-managed " "--meta --parameter-value false\n\n" "Erase the operation history of 'myResource' on 'aNode' (the cluster " "will 'forget' the existing\n" "resource state, including any errors, and attempt to recover the" "resource; useful when a resource\n" "had failed permanently and has been repaired by an administrator):\n\n" "\t# crm_resource --resource myResource --cleanup --node aNode\n\n"; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); g_option_context_set_description(context, description); /* Add the -Q option, which cannot be part of the globally supported options * because some tools use that flag for something else. */ pcmk__add_main_args(context, extra_prog_entries); pcmk__add_arg_group(context, "queries", "Queries:", "Show query help", query_entries); pcmk__add_arg_group(context, "commands", "Commands:", "Show command help", command_entries); pcmk__add_arg_group(context, "locations", "Locations:", "Show location help", location_entries); pcmk__add_arg_group(context, "validate", "Validate:", "Show validate help", validate_entries); pcmk__add_arg_group(context, "advanced", "Advanced:", "Show advanced option help", advanced_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); return context; } int main(int argc, char **argv) { xmlNode *cib_xml_copy = NULL; pe_resource_t *rsc = NULL; int rc = pcmk_rc_ok; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); GOptionContext *context = NULL; GOptionGroup *output_group = NULL; gchar **processed_args = NULL; context = build_arg_context(args, &output_group); pcmk__register_formats(output_group, formats); crm_log_cli_init("crm_resource"); processed_args = pcmk__cmdline_preproc(argv, "GINSTdginpstuv"); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } for (int i = 0; i < args->verbosity; i++) { crm_bump_log_level(argc, argv); } rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { fprintf(stderr, "Error creating output format %s: %s\n", args->output_ty, pcmk_rc_str(rc)); exit_code = CRM_EX_ERROR; goto done; } options.resource_verbose = args->verbosity; out->quiet = args->quiet; crm_log_args(argc, argv); if (options.host_uname) { crm_trace("Option host => %s", options.host_uname); } // If the user didn't explicitly specify a command, list resources if (options.rsc_cmd == cmd_none) { options.rsc_cmd = cmd_list_resources; options.require_resource = FALSE; } // --expired without --clear/-U doesn't make sense if (options.clear_expired && (options.rsc_cmd != cmd_clear)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--expired requires --clear or -U"); goto done; } if ((options.remainder != NULL) && (options.override_params != NULL)) { // Commands that use positional arguments will create override_params for (gchar **s = options.remainder; *s; s++) { char *name = calloc(1, strlen(*s)); char *value = calloc(1, strlen(*s)); int rc = sscanf(*s, "%[^=]=%s", name, value); if (rc == 2) { g_hash_table_replace(options.override_params, name, value); } else { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Error parsing '%s' as a name=value pair", argv[optind]); free(value); free(name); goto done; } } } else if (options.remainder != NULL) { gchar **strv = NULL; gchar *msg = NULL; int i = 1; int len = 0; for (gchar **s = options.remainder; *s; s++) { len++; } CRM_ASSERT(len > 0); strv = calloc(len, sizeof(char *)); strv[0] = strdup("non-option ARGV-elements:"); for (gchar **s = options.remainder; *s; s++) { strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s); i++; } msg = g_strjoinv("", strv); g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s", msg); g_free(msg); for(i = 0; i < len; i++) { free(strv[i]); } free(strv); goto done; } if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) { /* Kind of a hack to display XML lists using a real tag instead of . This just * saves from having to write custom messages to build the lists around all these things */ switch (options.rsc_cmd) { case cmd_list_resources: case cmd_query_xml: case cmd_query_raw_xml: case cmd_list_active_ops: case cmd_list_all_ops: case cmd_colocations: case cmd_colocations_deep: pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname()); break; default: pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname()); break; } } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) { if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep || options.rsc_cmd == cmd_list_resources) { pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname()); } } pe__register_messages(out); crm_resource_register_messages(out); lrmd__register_messages(out); pcmk__register_lib_messages(out); if (args->version) { out->version(out, false); goto done; } if (optind > argc) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Invalid option(s) supplied, use --help for valid usage"); exit_code = CRM_EX_USAGE; goto done; } // Sanity check validating from command line parameters. If everything checks out, // go ahead and run the validation. This way we don't need a CIB connection. if (options.validate_cmdline) { validate_cmdline(&exit_code); goto done; } else if (options.validate_options != NULL) { // @COMPAT @TODO error out here when we can break backward compatibility g_hash_table_destroy(options.validate_options); options.validate_options = NULL; } if (error != NULL) { exit_code = CRM_EX_USAGE; goto done; } if (options.force) { crm_debug("Forcing..."); cib__set_call_options(options.cib_options, crm_system_name, cib_quorum_override); } if (options.require_resource && !options.rsc_id) { rc = ENXIO; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Must supply a resource id with -r"); goto done; } if (options.find_flags && options.rsc_id) { options.require_dataset = TRUE; } // Establish a connection to the CIB if needed if (options.require_cib) { cib_conn = cib_new(); if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) { rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_DISCONNECT, "Could not create CIB connection"); goto done; } rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Could not connect to the CIB: %s", pcmk_rc_str(rc)); goto done; } } /* Populate working set from XML file if specified or CIB query otherwise */ if (options.require_dataset) { rc = populate_working_set(&cib_xml_copy); if (rc != pcmk_rc_ok) { goto done; } } // If command requires that resource exist if specified, find it if (options.find_flags && options.rsc_id) { rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id, options.find_flags); if (rsc == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "Resource '%s' not found", options.rsc_id); goto done; } } // Establish a connection to the controller if needed if (options.require_crmd) { rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Error connecting to the controller: %s", pcmk_rc_str(rc)); goto done; } pcmk_register_ipc_callback(controld_api, controller_event_callback, NULL); rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Error connecting to the controller: %s", pcmk_rc_str(rc)); goto done; } } switch (options.rsc_cmd) { case cmd_list_resources: { GListPtr all = NULL; all = g_list_prepend(all, strdup("*")); rc = out->message(out, "resource-list", data_set, pe_print_rsconly | pe_print_pending, FALSE, TRUE, FALSE, TRUE, all, all, FALSE); g_list_free_full(all, free); if (rc == pcmk_rc_no_output) { rc = ENXIO; } break; } case cmd_list_instances: rc = out->message(out, "resource-names-list", data_set->resources); if (rc != pcmk_rc_ok) { rc = ENXIO; } break; case cmd_list_standards: case cmd_list_providers: case cmd_list_alternatives: rc = list_providers(out, options.agent_spec, &exit_code); break; case cmd_list_agents: rc = list_agents(out, options.agent_spec, &exit_code); break; case cmd_metadata: rc = show_metadata(out, options.agent_spec, &exit_code); break; case cmd_restart: /* We don't pass data_set because rsc needs to stay valid for the * entire lifetime of cli_resource_restart(), but it will reset and * update the working set multiple times, so it needs to use its own * copy. */ rc = cli_resource_restart(out, rsc, options.host_uname, options.move_lifetime, options.timeout_ms, cib_conn, options.cib_options, options.promoted_role_only, options.force); break; case cmd_wait: rc = wait_till_stable(out, options.timeout_ms, cib_conn); break; case cmd_execute_agent: exit_code = cli_resource_execute(out, rsc, options.rsc_id, options.operation, options.override_params, options.timeout_ms, cib_conn, data_set, options.resource_verbose, options.force); break; case cmd_colocations: rc = out->message(out, "stacks-constraints", rsc, data_set, false); break; case cmd_colocations_deep: rc = out->message(out, "stacks-constraints", rsc, data_set, true); break; case cmd_cts: rc = pcmk_rc_ok; for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { rsc = (pe_resource_t *) lpc->data; cli_resource_print_cts(out, rsc); } cli_resource_print_cts_constraints(out, data_set); break; case cmd_fail: rc = cli_resource_fail(out, controld_api, options.host_uname, options.rsc_id, data_set); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } break; case cmd_list_active_ops: rc = cli_resource_print_operations(out, options.rsc_id, options.host_uname, TRUE, data_set); break; case cmd_list_all_ops: rc = cli_resource_print_operations(out, options.rsc_id, options.host_uname, FALSE, data_set); break; case cmd_locate: { GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set); rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id); break; } case cmd_query_xml: rc = cli_resource_print(out, rsc, data_set, TRUE); break; case cmd_query_raw_xml: rc = cli_resource_print(out, rsc, data_set, FALSE); break; case cmd_why: { pe_node_t *dest = NULL; if (options.host_uname) { dest = pe_find_node(data_set->nodes, options.host_uname); if (dest == NULL) { rc = pcmk_rc_node_unknown; goto done; } } out->message(out, "resource-reasons-list", cib_conn, data_set->resources, rsc, dest); rc = pcmk_rc_ok; } break; case cmd_clear: rc = clear_constraints(out, &cib_xml_copy); break; case cmd_move: if (options.host_uname == NULL) { rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); } else { rc = cli_resource_move(out, rsc, options.rsc_id, options.host_uname, options.move_lifetime, cib_conn, options.cib_options, data_set, options.promoted_role_only, options.force); } break; case cmd_ban: if (options.host_uname == NULL) { rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); } else { pe_node_t *dest = pe_find_node(data_set->nodes, options.host_uname); if (dest == NULL) { rc = pcmk_rc_node_unknown; goto done; } rc = cli_resource_ban(out, options.rsc_id, dest->details->uname, options.move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } break; case cmd_get_property: rc = out->message(out, "property-list", rsc, options.prop_name); if (rc == pcmk_rc_no_output) { rc = ENXIO; } break; case cmd_set_property: rc = set_property(); break; case cmd_get_param: { unsigned int count = 0; GHashTable *params = NULL; pe_node_t *current = pe__find_active_on(rsc, &count, NULL); bool free_params = true; if (count > 1) { out->err(out, "%s is active on more than one node," " returning the default value for %s", rsc->id, crm_str(options.prop_name)); current = NULL; } crm_debug("Looking up %s in %s", options.prop_name, rsc->id); if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { params = pe_rsc_params(rsc, current, data_set); free_params = false; } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { params = crm_str_table_new(); get_meta_attributes(params, rsc, current, data_set); } else { params = crm_str_table_new(); pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params, NULL, FALSE, data_set); } rc = out->message(out, "attribute-list", rsc, options.prop_name, params); if (free_params) { g_hash_table_destroy(params); } break; } case cmd_set_param: if (pcmk__str_empty(options.prop_value)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "You need to supply a value with the -v option"); rc = EINVAL; goto done; } /* coverity[var_deref_model] False positive */ rc = cli_resource_update_attribute(out, rsc, options.rsc_id, options.prop_set, options.attr_set_type, options.prop_id, options.prop_name, options.prop_value, options.recursive, cib_conn, options.cib_options, data_set, options.force); break; case cmd_delete_param: /* coverity[var_deref_model] False positive */ rc = cli_resource_delete_attribute(out, rsc, options.rsc_id, options.prop_set, options.attr_set_type, options.prop_id, options.prop_name, cib_conn, options.cib_options, data_set, options.force); break; case cmd_cleanup: if (rsc == NULL) { rc = cli_cleanup_all(out, controld_api, options.host_uname, options.operation, options.interval_spec, data_set); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } else { cleanup(out, rsc); } break; case cmd_refresh: if (rsc == NULL) { rc = refresh(out); } else { refresh_resource(out, rsc); } break; case cmd_delete: rc = delete(); break; default: g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_SOFTWARE, "Unimplemented command: %d", (int) options.rsc_cmd); break; } done: if (rc != pcmk_rc_ok) { if (rc == pcmk_rc_no_quorum) { g_prefix_error(&error, "To ignore quorum, use the force option.\n"); } if (error != NULL) { char *msg = crm_strdup_printf("%s\nError performing operation: %s", error->message, pcmk_rc_str(rc)); g_clear_error(&error); g_set_error(&error, PCMK__RC_ERROR, rc, "%s", msg); free(msg); } else { g_set_error(&error, PCMK__RC_ERROR, rc, "Error performing operation: %s", pcmk_rc_str(rc)); } if (exit_code == CRM_EX_OK) { exit_code = pcmk_rc2exitc(rc); } } g_free(options.host_uname); g_free(options.interval_spec); g_free(options.move_lifetime); g_free(options.operation); g_free(options.prop_id); free(options.prop_name); g_free(options.prop_set); g_free(options.prop_value); g_free(options.rsc_id); g_free(options.rsc_type); free(options.agent_spec); free(options.v_agent); free(options.v_class); free(options.v_provider); g_free(options.xml_file); g_strfreev(options.remainder); if (options.override_params != NULL) { g_hash_table_destroy(options.override_params); } /* options.validate_options does not need to be destroyed here. See the * comments in cli_resource_execute_from_params. */ g_strfreev(processed_args); g_option_context_free(context); return bye(exit_code); } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index 9ff9e96dcd..3f28c7bcfa 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,1945 +1,1942 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include resource_checks_t * cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed) { pe_resource_t *parent = uber_parent(rsc); resource_checks_t *rc = calloc(1, sizeof(resource_checks_t)); if (role_s) { enum rsc_role_e role = text2role(role_s); if (role == RSC_ROLE_STOPPED) { rc->flags |= rsc_remain_stopped; } else if (pcmk_is_set(parent->flags, pe_rsc_promotable) && role == RSC_ROLE_SLAVE) { rc->flags |= rsc_unpromotable; } } if (managed && !crm_is_true(managed)) { rc->flags |= rsc_unmanaged; } if (rsc->lock_node) { rc->lock_node = rsc->lock_node->details->uname; } rc->rsc = rsc; return rc; } GListPtr cli_resource_search(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, pe_working_set_t *data_set) { GListPtr found = NULL; pe_resource_t *parent = uber_parent(rsc); if (pe_rsc_is_clone(rsc)) { for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { GListPtr extra = ((pe_resource_t *) iter->data)->running_on; if (extra != NULL) { found = g_list_concat(found, extra); } } /* The anonymous clone children's common ID is supplied */ } else if (pe_rsc_is_clone(parent) && !pcmk_is_set(rsc->flags, pe_rsc_unique) && rsc->clone_name && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei) && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) { for (GListPtr iter = parent->children; iter; iter = iter->next) { GListPtr extra = ((pe_resource_t *) iter->data)->running_on; if (extra != NULL) { found = g_list_concat(found, extra); } } } else if (rsc->running_on != NULL) { found = g_list_concat(found, rsc->running_on); } return found; } #define XPATH_MAX 1024 // \return Standard Pacemaker return code static int find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, const char *rsc, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, char **value) { int offset = 0; int rc = pcmk_rc_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; if(value) { *value = NULL; } if(the_cib == NULL) { return ENOTCONN; } xpath_string = calloc(1, XPATH_MAX); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources")); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc); if (attr_set_type) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", attr_set_type); if (set_name) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name); } } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair["); if (attr_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id); } if (attr_name) { if (attr_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and "); } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name); } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]"); CRM_LOG_ASSERT(offset > 0); rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); if (xml_has_children(xml_search)) { xmlNode *child = NULL; rc = EINVAL; out->info(out, "Multiple attributes match name=%s", attr_name); for (child = pcmk__xml_first_child(xml_search); child != NULL; child = pcmk__xml_next(child)) { out->info(out, " Value: %s \t(id=%s)", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } out->spacer(out); } else if(value) { const char *tmp = crm_element_value(xml_search, attr); if (tmp) { *value = strdup(tmp); } } bail: free(xpath_string); free_xml(xml_search); return rc; } /* PRIVATE. Use the find_matching_attr_resources instead. */ static void find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* */ ** result, pe_resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, int depth) { int rc = pcmk_rc_ok; char *lookup_id = clone_strip(rsc->id); char *local_attr_id = NULL; /* visit the children */ for(GList *gIter = rsc->children; gIter; gIter = gIter->next) { find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data, rsc_id, attr_set, attr_set_type, attr_id, attr_name, cib, cmd, depth+1); /* do it only once for clones */ if(pe_clone == rsc->variant) { break; } } rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); /* Post-order traversal. * The root is always on the list and it is the last item. */ if((0 == depth) || (pcmk_rc_ok == rc)) { /* push the head */ *result = g_list_append(*result, rsc); } free(local_attr_id); free(lookup_id); } /* The result is a linearized pre-ordered tree of resources. */ static GList/**/ * find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, gboolean force) { int rc = pcmk_rc_ok; char *lookup_id = NULL; char *local_attr_id = NULL; GList * result = NULL; /* If --force is used, update only the requested resource (clone or primitive). * Otherwise, if the primitive has the attribute, use that. * Otherwise use the clone. */ if(force == TRUE) { return g_list_append(result, rsc); } if(rsc->parent && pe_clone == rsc->parent->variant) { int rc = pcmk_rc_ok; char *local_attr_id = NULL; rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); free(local_attr_id); if(rc != pcmk_rc_ok) { rsc = rsc->parent; if (!out->is_quiet(out)) { out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'", cmd, attr_name, rsc->id, rsc_id); } } return g_list_append(result, rsc); } else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) { pe_resource_t *child = rsc->children->data; if(child->variant == pe_native) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == pcmk_rc_ok) { rsc = child; if (!out->is_quiet(out)) { out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'", attr_name, lookup_id, cmd, rsc_id); } } free(local_attr_id); free(lookup_id); } return g_list_append(result, rsc); } /* If the resource is a group ==> children inherit the attribute if defined. */ find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set, attr_set_type, attr_id, attr_name, cib, cmd, 0); return result; } // \return Standard Pacemaker return code int cli_resource_update_attribute(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, int cib_options, pe_working_set_t *data_set, gboolean force) { int rc = pcmk_rc_ok; static bool need_init = TRUE; char *local_attr_id = NULL; char *local_attr_set = NULL; GList/**/ *resources = NULL; const char *common_attr_id = attr_id; if (attr_id == NULL && force == FALSE) { find_resource_attr (out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL); } if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { if (force == FALSE) { rc = find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id, XML_TAG_META_SETS, attr_set, attr_id, attr_name, &local_attr_id); if (rc == pcmk_rc_ok && !out->is_quiet(out)) { out->err(out, "WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)", uber_parent(rsc)->id, attr_name, local_attr_id); out->err(out, " Delete '%s' first or use the force option to override", local_attr_id); } free(local_attr_id); if (rc == pcmk_rc_ok) { return ENOTUNIQ; } } resources = g_list_append(resources, rsc); } else { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "update", force); } /* If either attr_set or attr_id is specified, * one clearly intends to modify a single resource. * It is the last item on the resource list.*/ for(GList *gIter = (attr_set||attr_id) ? g_list_last(resources) : resources ; gIter; gIter = gIter->next) { char *lookup_id = NULL; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; local_attr_id = NULL; local_attr_set = NULL; rsc = (pe_resource_t*)gIter->data; attr_id = common_attr_id; lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if (rc == pcmk_rc_ok) { crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id); attr_id = local_attr_id; } else if (rc != ENXIO) { free(lookup_id); free(local_attr_id); g_list_free(resources); return rc; } else { const char *tag = crm_element_name(rsc->xml); if (attr_set == NULL) { local_attr_set = crm_strdup_printf("%s-%s", lookup_id, attr_set_type); attr_set = local_attr_set; } if (attr_id == NULL) { local_attr_id = crm_strdup_printf("%s-%s", attr_set, attr_name); attr_id = local_attr_id; } xml_top = create_xml_node(NULL, tag); crm_xml_add(xml_top, XML_ATTR_ID, lookup_id); xml_obj = create_xml_node(xml_top, attr_set_type); crm_xml_add(xml_obj, XML_ATTR_ID, attr_set); } xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok && !out->is_quiet(out)) { out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s", lookup_id, local_attr_id, attr_set ? " set=" : "", attr_set ? attr_set : "", attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value); } free_xml(xml_top); free(lookup_id); free(local_attr_id); free(local_attr_set); if(recursive && pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { GListPtr lpc = NULL; if(need_init) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); need_init = FALSE; unpack_constraints(cib_constraints, data_set); pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating); } crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs); pe__set_resource_flags(rsc, pe_rsc_allocating); for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; pe_resource_t *peer = cons->rsc_lh; crm_debug("Checking %s %d", cons->id, cons->score); if (cons->score > 0 && !pcmk_is_set(peer->flags, pe_rsc_allocating)) { /* Don't get into colocation loops */ crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id); cli_resource_update_attribute(out, peer, peer->id, NULL, attr_set_type, NULL, attr_name, attr_value, recursive, cib, cib_options, data_set, force); } } } } g_list_free(resources); return rc; } // \return Standard Pacemaker return code int cli_resource_delete_attribute(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, int cib_options, pe_working_set_t *data_set, gboolean force) { int rc = pcmk_rc_ok; GList/**/ *resources = NULL; if (attr_id == NULL && force == FALSE) { find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL); } if(pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "delete", force); } else { resources = g_list_append(resources, rsc); } for(GList *gIter = resources; gIter; gIter = gIter->next) { char *lookup_id = NULL; xmlNode *xml_obj = NULL; char *local_attr_id = NULL; rsc = (pe_resource_t*)gIter->data; lookup_id = clone_strip(rsc->id); rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if (rc == ENXIO) { free(lookup_id); rc = pcmk_rc_ok; continue; } else if (rc != pcmk_rc_ok) { free(lookup_id); g_list_free(resources); return rc; } if (attr_id == NULL) { attr_id = local_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); CRM_ASSERT(cib); rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok && !out->is_quiet(out)) { out->info(out, "Deleted '%s' option: id=%s%s%s%s%s", lookup_id, local_attr_id, attr_set ? " set=" : "", attr_set ? attr_set : "", attr_name ? " name=" : "", attr_name ? attr_name : ""); } free(lookup_id); free_xml(xml_obj); free(local_attr_id); } g_list_free(resources); return rc; } // \return Standard Pacemaker return code static int send_lrm_rsc_op(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, bool do_fail_resource, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { const char *router_node = host_uname; const char *rsc_api_id = NULL; const char *rsc_long_id = NULL; const char *rsc_class = NULL; const char *rsc_provider = NULL; const char *rsc_type = NULL; bool cib_only = false; pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { out->err(out, "Resource %s not found", rsc_id); return ENXIO; } else if (rsc->variant != pe_native) { out->err(out, "We can only process primitive resources, not %s", rsc_id); return EINVAL; } rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER), rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE); if ((rsc_class == NULL) || (rsc_type == NULL)) { out->err(out, "Resource %s does not have a class and type", rsc_id); return EINVAL; } if (host_uname == NULL) { out->err(out, "Please specify a node name"); return EINVAL; } else { pe_node_t *node = pe_find_node(data_set->nodes, host_uname); if (node == NULL) { out->err(out, "Node %s not found", host_uname); return pcmk_rc_node_unknown; } if (!(node->details->online)) { if (do_fail_resource) { out->err(out, "Node %s is not online", host_uname); return ENOTCONN; } else { cib_only = true; } } if (!cib_only && pe__is_guest_or_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { out->err(out, "No cluster connection to Pacemaker Remote node %s detected", host_uname); return ENOTCONN; } router_node = node->details->uname; } } if (rsc->clone_name) { rsc_api_id = rsc->clone_name; rsc_long_id = rsc->id; } else { rsc_api_id = rsc->id; } if (do_fail_resource) { return pcmk_controld_api_fail(controld_api, host_uname, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type); } else { return pcmk_controld_api_refresh(controld_api, host_uname, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type, cib_only); } } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(pe_resource_t *rsc) { const char *name = (rsc->clone_name? rsc->clone_name : rsc->id); return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name); } // \return Standard Pacemaker return code static int clear_rsc_history(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { int rc = pcmk_rc_ok; /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ rc = send_lrm_rsc_op(out, controld_api, false, host_uname, rsc_id, data_set); if (rc != pcmk_rc_ok) { return rc; } crm_trace("Processing %d mainloop inputs", pcmk_controld_api_replies_expected(controld_api)); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", pcmk_controld_api_replies_expected(controld_api)); } return rc; } // \return Standard Pacemaker return code static int clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, const char *node_name, const char *rsc_id, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { int rc = pcmk_rc_ok; const char *failed_value = NULL; const char *failed_id = NULL; const char *interval_ms_s = NULL; GHashTable *rscs = NULL; GHashTableIter iter; /* Create a hash table to use as a set of resources to clean. This lets us * clean each resource only once (per node) regardless of how many failed * operations it has. */ rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); // Normalize interval to milliseconds for comparison to history entry if (operation) { interval_ms_s = crm_strdup_printf("%u", crm_parse_interval_spec(interval_spec)); } for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL; xml_op = pcmk__xml_next(xml_op)) { failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); if (failed_id == NULL) { // Malformed history entry, should never happen continue; } // No resource specified means all resources match if (rsc_id) { pe_resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources, failed_id, pe_find_renamed|pe_find_anon); if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) { continue; } } // Host name should always have been provided by this point failed_value = crm_element_value(xml_op, XML_ATTR_UNAME); if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) { continue; } // No operation specified means all operations match if (operation) { failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK); if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) { continue; } // Interval (if operation was specified) defaults to 0 (not all) failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) { continue; } } - /* not available until glib 2.32 g_hash_table_add(rscs, (gpointer) failed_id); - */ - g_hash_table_insert(rscs, (gpointer) failed_id, (gpointer) failed_id); } g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { crm_debug("Erasing failures of %s on %s", failed_id, node_name); rc = clear_rsc_history(out, controld_api, node_name, failed_id, data_set); if (rc != pcmk_rc_ok) { return rc; } } g_hash_table_destroy(rscs); return rc; } // \return Standard Pacemaker return code static int clear_rsc_fail_attrs(pe_resource_t *rsc, const char *operation, const char *interval_spec, pe_node_t *node) { int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; char *rsc_name = rsc_fail_name(rsc); if (pe__is_guest_or_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } rc = pcmk__node_attr_request_clear(NULL, node->details->uname, rsc_name, operation, interval_spec, NULL, attr_options); free(rsc_name); return rc; } // \return Standard Pacemaker return code int cli_resource_delete(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, const char *host_uname, pe_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pe_working_set_t *data_set, gboolean force) { int rc = pcmk_rc_ok; pe_node_t *node = NULL; if (rsc == NULL) { return ENXIO; } else if (rsc->children) { GListPtr lpc = NULL; for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { pe_resource_t *child = (pe_resource_t *) lpc->data; rc = cli_resource_delete(out, controld_api, host_uname, child, operation, interval_spec, just_failures, data_set, force); if (rc != pcmk_rc_ok) { return rc; } } return pcmk_rc_ok; } else if (host_uname == NULL) { GListPtr lpc = NULL; GListPtr nodes = g_hash_table_get_values(rsc->known_on); if(nodes == NULL && force) { nodes = pcmk__copy_node_list(data_set->nodes, false); } else if(nodes == NULL && rsc->exclusive_discover) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) { if(node->weight >= 0) { nodes = g_list_prepend(nodes, node); } } } else if(nodes == NULL) { nodes = g_hash_table_get_values(rsc->allowed_nodes); } for (lpc = nodes; lpc != NULL; lpc = lpc->next) { node = (pe_node_t *) lpc->data; if (node->details->online) { rc = cli_resource_delete(out, controld_api, node->details->uname, rsc, operation, interval_spec, just_failures, data_set, force); } if (rc != pcmk_rc_ok) { g_list_free(nodes); return rc; } } g_list_free(nodes); return pcmk_rc_ok; } node = pe_find_node(data_set->nodes, host_uname); if (node == NULL) { out->err(out, "Unable to clean up %s because node %s not found", rsc->id, host_uname); return ENODEV; } if (!node->details->rsc_discovery_enabled) { out->err(out, "Unable to clean up %s because resource discovery disabled on %s", rsc->id, host_uname); return EOPNOTSUPP; } if (controld_api == NULL) { out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file", rsc->id, host_uname); return pcmk_rc_ok; } rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up %s failures on %s: %s", rsc->id, host_uname, pcmk_rc_str(rc)); return rc; } if (just_failures) { rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation, interval_spec, data_set); } else { rc = clear_rsc_history(out, controld_api, host_uname, rsc->id, data_set); } if (rc != pcmk_rc_ok) { out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s", rsc->id, host_uname, pcmk_strerror(rc)); } else { out->info(out, "Cleaned up %s on %s", rsc->id, host_uname); } return rc; } // \return Standard Pacemaker return code int cli_cleanup_all(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, const char *node_name, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; const char *display_name = node_name? node_name : "all nodes"; if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", display_name); return rc; } if (node_name) { pe_node_t *node = pe_find_node(data_set->nodes, node_name); if (node == NULL) { out->err(out, "Unknown node: %s", node_name); return ENXIO; } if (pe__is_guest_or_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } } rc = pcmk__node_attr_request_clear(NULL, node_name, NULL, operation, interval_spec, NULL, attr_options); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up all failures on %s: %s", display_name, pcmk_rc_str(rc)); return rc; } if (node_name) { rc = clear_rsc_failures(out, controld_api, node_name, NULL, operation, interval_spec, data_set); if (rc != pcmk_rc_ok) { out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s", node_name, pcmk_strerror(rc)); return rc; } } else { for (GList *iter = data_set->nodes; iter; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL, operation, interval_spec, data_set); if (rc != pcmk_rc_ok) { out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s", pcmk_strerror(rc)); return rc; } } } out->info(out, "Cleaned up all resources on %s", display_name); return rc; } int cli_resource_check(pcmk__output_t *out, cib_t * cib_conn, pe_resource_t *rsc) { char *role_s = NULL; char *managed = NULL; pe_resource_t *parent = uber_parent(rsc); int rc = pcmk_rc_no_output; resource_checks_t *checks = NULL; find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); checks = cli_check_resource(rsc, role_s, managed); if (checks->flags != 0 || checks->lock_node != NULL) { rc = out->message(out, "resource-check-list", checks); } free(role_s); free(managed); free(checks); return rc; } // \return Standard Pacemaker return code int cli_resource_fail(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { crm_notice("Failing %s on %s", rsc_id, host_uname); return send_lrm_rsc_op(out, controld_api, true, host_uname, rsc_id, data_set); } static GHashTable * generate_resource_params(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; char *key = NULL; char *value = NULL; combined = crm_str_table_new(); params = pe_rsc_params(rsc, node, data_set); if (params != NULL) { g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { g_hash_table_insert(combined, strdup(key), strdup(value)); } } meta = crm_str_table_new(); get_meta_attributes(meta, rsc, node, data_set); if (meta != NULL) { g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } bool resource_is_running_on(pe_resource_t *rsc, const char *host) { bool found = TRUE; GListPtr hIter = NULL; GListPtr hosts = NULL; if(rsc == NULL) { return FALSE; } rsc->fns->location(rsc, &hosts, TRUE); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pe_node_t *node = (pe_node_t *) hIter->data; if(strcmp(host, node->details->uname) == 0) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } else if(strcmp(host, node->details->id) == 0) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if(host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = FALSE; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = FALSE; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { pe_resource_t *rsc = (pe_resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (rsc->variant == pe_group) { active = g_list_concat(active, get_active_resources(host, rsc->children)); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(pcmk__output_t *out, GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { out->info(out, "%s%s", tag, (const char *)item->data); } } /*! * \internal * \brief Upgrade XML to latest schema version and use it as working set input * * This also updates the working set timestamp to the current time. * * \param[in] data_set Working set instance to update * \param[in] xml XML to use as input * * \return Standard Pacemaker return code * \note On success, caller is responsible for freeing memory allocated for * data_set->now. * \todo This follows the example of other callers of cli_config_update() * and returns ENOKEY ("Required key not available") if that fails, * but perhaps pcmk_rc_schema_validation would be better in that case. */ int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml) { if (cli_config_update(xml, NULL, FALSE) == FALSE) { return ENOKEY; } data_set->input = *xml; data_set->now = crm_time_new(NULL); return pcmk_rc_ok; } /*! * \internal * \brief Update a working set's XML input based on a CIB query * * \param[in] data_set Data set instance to initialize * \param[in] cib Connection to the CIB manager * * \return Standard Pacemaker return code * \note On success, caller is responsible for freeing memory allocated for * data_set->input and data_set->now. */ static int update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set, cib_t *cib) { xmlNode *cib_xml_copy = NULL; int rc = pcmk_rc_ok; rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc); return rc; } rc = update_working_set_xml(data_set, &cib_xml_copy); if (rc != pcmk_rc_ok) { out->err(out, "Could not upgrade the current CIB XML"); free_xml(cib_xml_copy); return rc; } return rc; } // \return Standard Pacemaker return code static int update_dataset(pcmk__output_t *out, cib_t *cib, pe_working_set_t * data_set, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc = pcmk_rc_ok; pe_reset_working_set(data_set); rc = update_working_set_from_cib(out, data_set, cib); if (rc != pcmk_rc_ok) { return rc; } if(simulate) { pid = pcmk__getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { out->err(out, "Could not create shadow cib: '%s'", pid); rc = ENXIO; goto cleanup; } rc = write_xml_file(data_set->input, shadow_file, FALSE); if (rc < 0) { out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc); goto cleanup; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc); goto cleanup; } pcmk__schedule_actions(data_set, data_set->input, NULL); run_simulation(data_set, shadow_cib, NULL, TRUE); rc = update_dataset(out, shadow_cib, data_set, FALSE); } else { cluster_status(data_set); } cleanup: /* Do not free data_set->input here, we need rsc->xml to be valid later on */ cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } static int max_delay_for_resource(pe_working_set_t * data_set, pe_resource_t *rsc) { int delay = 0; int max_delay = 0; if(rsc && rsc->children) { GList *iter = NULL; for(iter = rsc->children; iter; iter = iter->next) { pe_resource_t *child = (pe_resource_t *)iter->data; delay = max_delay_for_resource(data_set, child); if(delay > max_delay) { double seconds = delay / 1000.0; crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id); max_delay = delay; } } } else if(rsc) { char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP); pe_action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set); const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT); max_delay = value? (int) crm_parse_ll(value, NULL) : -1; pe_free_action(stop); } return max_delay; } static int max_delay_in(pe_working_set_t * data_set, GList *resources) { int max_delay = 0; GList *item = NULL; for (item = resources; item != NULL; item = item->next) { int delay = 0; pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data); delay = max_delay_for_resource(data_set, rsc); if(delay > max_delay) { double seconds = delay / 1000.0; crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id); max_delay = delay; } } return 5 + (max_delay / 1000); } #define waiting_for_starts(d, r, h) ((d != NULL) || \ (resource_is_running_on((r), (h)) == FALSE)) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in] rsc The resource to restart * \param[in] host The host to restart the resource on (or NULL for all) * \param[in] timeout_ms Consider failed if actions do not complete in this time * (specified in milliseconds, but a two-second * granularity is actually used; if 0, a timeout will be * calculated based on the resource timeout) * \param[in] cib Connection to the CIB manager * * \return Standard Pacemaker return code (exits on certain failures) */ int cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host, const char *move_lifetime, int timeout_ms, cib_t *cib, int cib_options, gboolean promoted_role_only, gboolean force) { int rc = pcmk_rc_ok; int lpc = 0; int before = 0; int step_timeout_s = 0; int sleep_interval = 2; int timeout = timeout_ms / 1000; bool stop_via_ban = FALSE; char *rsc_id = NULL; char *orig_target_role = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pe_working_set_t *data_set = NULL; if(resource_is_running_on(rsc, host) == FALSE) { const char *id = rsc->clone_name?rsc->clone_name:rsc->id; if(host) { out->err(out, "%s is not running on %s and so cannot be restarted", id, host); } else { out->err(out, "%s is not running anywhere and so cannot be restarted", id); } return ENXIO; } rsc_id = strdup(rsc->id); if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) { stop_via_ban = TRUE; } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ data_set = pe_new_working_set(); if (data_set == NULL) { crm_perror(LOG_ERR, "Could not allocate working set"); rc = ENOMEM; goto done; } pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); rc = update_dataset(out, cib, data_set, FALSE); if(rc != pcmk_rc_ok) { out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc); goto done; } restart_target_active = get_active_resources(host, data_set->resources); current_active = get_active_resources(host, data_set->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ out->quiet = true; rc = cli_resource_ban(out, rsc_id, host, move_lifetime, NULL, cib, cib_options, promoted_role_only); } else { /* Stop the resource by setting target-role to Stopped. * Remember any existing target-role so we can restore it later * (though it only makes any difference if it's Slave). */ char *lookup_id = clone_strip(rsc->id); find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role); free(lookup_id); rc = cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, RSC_STOPPED, FALSE, cib, cib_options, data_set, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc); if (current_active) { g_list_free_full(current_active, free); } if (restart_target_active) { g_list_free_full(restart_target_active, free); } goto done; } rc = update_dataset(out, cib, data_set, TRUE); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources would be stopped"); goto failure; } target_active = get_active_resources(host, data_set->resources); dump_list(target_active, "Target"); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (list_delta != NULL) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(out, cib, data_set, FALSE); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were stopped"); goto failure; } if (current_active) { g_list_free_full(current_active, free); } current_active = get_active_resources(host, data_set->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ out->info(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } if (stop_via_ban) { rc = cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force); } else if (orig_target_role) { rc = cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, cib_options, data_set, force); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, cib_options, data_set, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc); goto done; } if (target_active) { g_list_free_full(target_active, free); } target_active = restart_target_active; list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(out, cib, data_set, FALSE); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were started"); goto failure; } if (current_active) { g_list_free_full(current_active, free); } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ current_active = get_active_resources(NULL, data_set->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ out->info(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } rc = pcmk_rc_ok; goto done; failure: if (stop_via_ban) { cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force); } else if (orig_target_role) { cli_resource_update_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, cib_options, data_set, force); free(orig_target_role); } else { cli_resource_delete_attribute(out, rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, cib_options, data_set, force); } done: if (list_delta) { g_list_free(list_delta); } if (current_active) { g_list_free_full(current_active, free); } if (target_active && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active) { g_list_free_full(restart_target_active, free); } free(rsc_id); pe_free_working_set(data_set); return rc; } static inline bool action_is_pending(pe_action_t *action) { if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo) || !pcmk_is_set(action->flags, pe_action_runnable) || pcmk__str_eq("notify", action->task, pcmk__str_casei)) { return false; } return true; } /*! * \internal * \brief Return TRUE if any actions in a list are pending * * \param[in] actions List of actions to check * * \return TRUE if any actions in the list are pending, FALSE otherwise */ static bool actions_are_pending(GListPtr actions) { GListPtr action; for (action = actions; action != NULL; action = action->next) { pe_action_t *a = (pe_action_t *)action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags); return TRUE; } } return FALSE; } static void print_pending_actions(pcmk__output_t *out, GListPtr actions) { GListPtr action; out->info(out, "Pending actions:"); for (action = actions; action != NULL; action = action->next) { pe_action_t *a = (pe_action_t *) action->data; if (!action_is_pending(a)) { continue; } if (a->node) { out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, a->node->details->uname); } else { out->info(out, "\tAction %d: %s", a->id, a->uuid); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in] timeout_ms Consider failed if actions do not complete in this time * (specified in milliseconds, but one-second granularity * is actually used; if 0, a default will be used) * \param[in] cib Connection to the CIB manager * * \return Standard Pacemaker return code */ int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) { pe_working_set_t *data_set = NULL; int rc = pcmk_rc_ok; int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S; time_t expire_time = time(NULL) + timeout_s; time_t time_diff; bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet data_set = pe_new_working_set(); if (data_set == NULL) { return ENOMEM; } pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff > 0) { crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff); } else { print_pending_actions(out, data_set->actions); pe_free_working_set(data_set); return ETIME; } if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ pe_reset_working_set(data_set); rc = update_working_set_from_cib(out, data_set, cib); if (rc != pcmk_rc_ok) { pe_free_working_set(data_set); return rc; } pcmk__schedule_actions(data_set, data_set->input, NULL); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two * could come to different conclusions about what actions need to be * done. Warn the user in this case. * * @TODO A possible long-term solution would be to reimplement the * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ const char *dc_version = g_hash_table_lookup(data_set->config_hash, "dc-version"); if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) { out->info(out, "warning: wait option may not work properly in " "mixed-version cluster"); printed_version_warning = TRUE; } } } while (actions_are_pending(data_set->actions)); pe_free_working_set(data_set); return rc; } crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *action, GHashTable *params, GHashTable *override_hash, int timeout_ms, int resource_verbose, gboolean force) { GHashTable *params_copy = NULL; crm_exit_t exit_code = CRM_EX_OK; svc_action_t *op = NULL; if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { out->err(out, "Sorry, the %s option doesn't support %s resources yet", action, rsc_class); crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); } /* If no timeout was provided, grab the default. */ if (timeout_ms == 0) { timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } /* add meta_timeout env needed by some resource agents */ g_hash_table_insert(params, strdup("CRM_meta_timeout"), crm_strdup_printf("%d", timeout_ms)); /* add crm_feature_set env needed by some resource agents */ g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); /* resources_action_create frees the params hash table it's passed, but we * may need to reuse it in a second call to resources_action_create. Thus * we'll make a copy here so that gets freed and the original remains for * reuse. */ params_copy = crm_str_table_dup(params); op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0, timeout_ms, params_copy, 0); if (op == NULL) { /* Re-run with stderr enabled so we can display a sane error message */ crm_enable_stderr(TRUE); params_copy = crm_str_table_dup(params); op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0, timeout_ms, params_copy, 0); /* Callers of cli_resource_execute expect that the params hash table will * be freed. That function uses this one, so for that reason and for * making the two act the same, we should free the hash table here too. */ g_hash_table_destroy(params); /* We know op will be NULL, but this makes static analysis happy */ services_action_free(op); crm_exit(CRM_EX_DATAERR); return exit_code; // Never reached, but helps static analysis } setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1); if(resource_verbose > 1) { setenv("OCF_TRACE_RA", "1", 1); } /* A resource agent using the standard ocf-shellfuncs library will not print * messages to stderr if it doesn't have a controlling terminal (e.g. if * crm_resource is called via script or ssh). This forces it to do so. */ setenv("OCF_TRACE_FILE", "/dev/stderr", 0); if (override_hash) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, override_hash); while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) { out->info(out, "Overriding the cluster configuration for '%s' with '%s' = '%s'", rsc_name, name, value); g_hash_table_replace(op->params, strdup(name), strdup(value)); } } if (services_action_sync(op)) { exit_code = op->rc; if (op->status == PCMK_LRM_OP_DONE) { out->info(out, "Operation %s for %s (%s:%s:%s) returned: '%s' (%d)", action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, services_ocf_exitcode_str(op->rc), op->rc); } else { out->info(out, "Operation %s for %s (%s:%s:%s) failed: '%s' (%d)", action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type, services_lrm_status_str(op->status), op->status); } /* hide output for validate-all if not in verbose */ if (resource_verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei)) goto done; if (op->stdout_data || op->stderr_data) { out->subprocess_output(out, op->rc, op->stdout_data, op->stderr_data); } } else { exit_code = op->rc == 0 ? CRM_EX_ERROR : op->rc; } done: services_action_free(op); /* See comment above about why we free params here. */ g_hash_table_destroy(params); return exit_code; } crm_exit_t cli_resource_execute(pcmk__output_t *out, pe_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, int timeout_ms, cib_t * cib, pe_working_set_t *data_set, int resource_verbose, gboolean force) { crm_exit_t exit_code = CRM_EX_OK; const char *rid = NULL; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; const char *action = NULL; GHashTable *params = NULL; if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { action = "validate-all"; } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { action = "monitor"; } else if (pcmk__str_eq(rsc_action, "force-stop", pcmk__str_casei)) { action = rsc_action+6; } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", "force-promote", NULL)) { action = rsc_action+6; if(pe_rsc_is_clone(rsc)) { GListPtr rscs = cli_resource_search(out, rsc, requested_name, data_set); if(rscs != NULL && force == FALSE) { out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", action, rsc->id); out->err(out, "Try setting target-role=Stopped first or specifying " "the force option"); return CRM_EX_UNSAFE; } } } else { action = rsc_action; } if(pe_rsc_is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->children->data; } if(rsc->variant == pe_group) { out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); params = generate_resource_params(rsc, NULL /* @TODO use local node */, data_set); if (timeout_ms == 0) { timeout_ms = pe_get_configured_timeout(rsc, action, data_set); } rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id; exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, action, params, override_hash, timeout_ms, resource_verbose, force); return exit_code; } // \return Standard Pacemaker return code int cli_resource_move(pcmk__output_t *out, pe_resource_t *rsc, const char *rsc_id, const char *host_name, const char *move_lifetime, cib_t *cib, int cib_options, pe_working_set_t *data_set, gboolean promoted_role_only, gboolean force) { int rc = pcmk_rc_ok; unsigned int count = 0; pe_node_t *current = NULL; pe_node_t *dest = pe_find_node(data_set->nodes, host_name); bool cur_is_dest = FALSE; if (dest == NULL) { return pcmk_rc_node_unknown; } if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pe_resource_t *p = uber_parent(rsc); if (pcmk_is_set(p->flags, pe_rsc_promotable)) { out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { out->info(out, "Ignoring master option: %s is not promotable", rsc_id); promoted_role_only = FALSE; } } current = pe__find_active_requires(rsc, &count); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { GListPtr iter = NULL; unsigned int master_count = 0; pe_node_t *master_node = NULL; for(iter = rsc->children; iter; iter = iter->next) { pe_resource_t *child = (pe_resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { rsc = child; master_node = pe__current_node(child); master_count++; } } if (promoted_role_only || master_count) { count = master_count; current = master_node; } } if (count > 1) { if (pe_rsc_is_clone(rsc)) { current = NULL; } else { return pcmk_rc_multiple; } } if (current && (current->details == dest->details)) { cur_is_dest = TRUE; if (force) { crm_info("%s is already %s on %s, reinforcing placement with location constraint.", rsc_id, promoted_role_only?"promoted":"active", dest->details->uname); } else { return pcmk_rc_already; } } /* Clear any previous prefer constraints across all nodes. */ cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, FALSE, force); /* Clear any previous ban constraints on 'dest'. */ cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib, cib_options, TRUE, force); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime, cib, cib_options, promoted_role_only); crm_trace("%s%s now prefers node %s%s", rsc->id, promoted_role_only?" (master)":"", dest->details->uname, force?"(forced)":""); /* only ban the previous location if current location != destination location. * it is possible to use -M to enforce a location without regard of where the * resource is currently located */ if(force && (cur_is_dest == FALSE)) { /* Ban the original location if possible */ if(current) { (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime, NULL, cib, cib_options, promoted_role_only); } else if(count > 1) { out->info(out, "Resource '%s' is currently %s in %d locations. " "One may now move to %s", rsc_id, (promoted_role_only? "promoted" : "active"), count, dest->details->uname); out->info(out, "To prevent '%s' from being %s at a specific location, " "specify a node.", rsc_id, (promoted_role_only? "promoted" : "active")); } else { crm_trace("Not banning %s from its current location: not active", rsc_id); } } return rc; } diff --git a/tools/pcmk_simtimes.in b/tools/pcmk_simtimes.in index 0a9f71405f..f082922f1e 100644 --- a/tools/pcmk_simtimes.in +++ b/tools/pcmk_simtimes.in @@ -1,151 +1,148 @@ #!@PYTHON@ """ Timing comparisons for crm_simulate profiling output """ -# Pacemaker targets compatibility with Python 2.7 and 3.2+ -from __future__ import print_function, unicode_literals, absolute_import, division - -__copyright__ = "Copyright 2019 the Pacemaker project contributors" +__copyright__ = "Copyright 2019-2020 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import re import sys import errno import argparse DESC = """Compare timings from crm_simulate profiling output""" # These values must be kept in sync with include/crm/crm.h class CrmExit(object): OK = 0 BEFORE_HELP = """Output of "crm_simulate --profile cts/scheduler --repeat " from earlier Pacemaker build""" # line like: * Testing cts/scheduler/1360.xml ... 0.07 secs PATTERN = r"""^\s*\*\s+Testing\s+.*/([^/]+)\.xml\s+\.+\s+([.0-9]+)\s+secs\s*$""" def parse_args(argv=sys.argv): """ Parse command-line arguments """ parser = argparse.ArgumentParser(description=DESC) parser.add_argument('-V', '--verbose', action='count', help='Increase verbosity') parser.add_argument('-p', '--threshold-percent', type=float, default=0, help="Don't show tests with less than this percentage difference in times") parser.add_argument('-s', '--threshold-seconds', type=float, default=0, help="Don't show tests with less than this seconds difference in times") parser.add_argument('-S', '--sort', choices=['test', 'before', 'after', 'diff', 'percent'], default='test', help="Sort results by this column") parser.add_argument('-r', '--reverse', action='store_true', help="Sort results in descending order") parser.add_argument('before_file', metavar='BEFORE', type=argparse.FileType('r'), help=BEFORE_HELP) parser.add_argument('after_file', metavar='AFTER', type=argparse.FileType('r'), help='Output of same command from later Pacemaker build') return parser.parse_args(argv[1:]) def extract_times(infile): """ Extract test names and times into hash table from file """ result = {} for line in infile: match = re.search(PATTERN, line) if match is not None: result[match.group(1)] = match.group(2) return result def compare_test(test, before, after, args): """ Compare one test's timings """ try: before_time = float(before[test]) except KeyError: if args.verbose > 0: print("No previous test " + test + " to compare") return None after_time = float(after[test]) time_diff = after_time - before_time time_diff_percent = (time_diff / before_time) * 100 if ((abs(time_diff) >= args.threshold_seconds) and (abs(time_diff_percent) >= args.threshold_percent)): return { 'test': test, 'before': before_time, 'after': after_time, 'diff': time_diff, 'percent': time_diff_percent } return None def sort_diff(result): """ Sort two test results by time difference """ global sort_field return result[sort_field] def print_results(results, sort_reverse): """ Output the comparison results """ if results == []: return # Sort and print test differences results.sort(reverse=sort_reverse, key=sort_diff) for result in results: print("%-40s %6.2fs vs %6.2fs (%+.2fs = %+6.2f%%)" % (result['test'], result['before'], result['after'], result['diff'], result['percent'])) # Print average differences diff_total = sum(d['diff'] for d in results) percent_total = sum(d['percent'] for d in results) nresults = len(results) print("\nAverages: %+.2fs %+6.2f%%" % ((diff_total / nresults), (percent_total / nresults))) if __name__ == "__main__": global sort_field try: args = parse_args() before = extract_times(args.before_file) after = extract_times(args.after_file) sort_field = args.sort # Build a list of test differences results = [] for test in after.keys(): result = compare_test(test, before, after, args) if result is not None: results = results + [ result ] print_results(results, sort_reverse=args.reverse) except KeyboardInterrupt: pass except IOError as e: if e.errno != errno.EPIPE: raise sys.exit(CrmExit.OK) # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120: diff --git a/xml/regression.sh b/xml/regression.sh index 06b3940979..7269779c47 100755 --- a/xml/regression.sh +++ b/xml/regression.sh @@ -1,786 +1,782 @@ #!/bin/sh -# Copyright 2018-2019 the Pacemaker project contributors +# +# Copyright 2018-2020 the Pacemaker project contributors # # The version control history for this file may have further details. # -# SPDX-License-Identifier: GPL-2.0-or-later +# This source code is licensed under the GNU General Public License version 2 +# or later (GPLv2+) WITHOUT ANY WARRANTY. set -eu test -d assets && test -d test-2 \ || { echo 'Run me from source-tree-like location'; exit 1; } # $1=reference (can be '-' for stdin), $2=investigated # alt.: wdiff, colordiff, ... DIFF=${DIFF:-diff} DIFFOPTS=${DIFFOPTS--u} DIFFPAGER=${DIFFPAGER:-less -LRX} # $1=schema, $2=validated # alt.: jing -i RNGVALIDATOR=${RNGVALIDATOR:-xmllint --noout --relaxng} # $1=stylesheet, $2=source # alt.: Xalan, saxon, sabcmd/Sablotron (note: only validates reliably with -B) _xalan_wrapper() { { ${_XSLTPROCESSOR} "$2" "$1" 2>&1 >&3 \ | sed -e '/^Source tree node.*$/d' \ -e 's|^XSLT message: \(.*\) (Occurred.*)|\1|'; } 3>&- 3>&1 >&2 } # Sablotron doesn't translate '-' file specification to stdin # and limits the length of the output message _sabcmd_wrapper() { _sabw_sheet=${1:?} _sabw_source=${2:?} test "${_sabw_sheet}" != - || _sabw_sheet=/dev/stdin test "${_sabw_source}" != - || _sabw_source=/dev/stdin { ${_XSLTPROCESSOR} "${_sabw_sheet}" "${_sabw_source}" 2>&1 >&3 \ | sed -e '/^Warning \[code:89\]/d' \ -e 's|^ xsl:message (\(.*\))$|\1|'; } 3>&- 3>&1 >&2 } # filtered out message: https://bugzilla.redhat.com/show_bug.cgi?id=1577367 _saxon_wrapper() { { ${_XSLTPROCESSOR} "-xsl:$1" "-s:$2" -versionmsg:off 2>&1 >&3 \ | sed -e '/^Cannot find CatalogManager.properties$/d'; } 3>&- 3>&1 >&2 } XSLTPROCESSOR=${XSLTPROCESSOR:-xsltproc --nonet} _XSLTPROCESSOR=${XSLTPROCESSOR} case "${XSLTPROCESSOR}" in [Xx]alan*|*/[Xx]alan*) XSLTPROCESSOR=_xalan_wrapper;; sabcmd*|*/sabcmd*) XSLTPROCESSOR=_sabcmd_wrapper;; saxon*|*/saxon*) XSLTPROCESSOR=_saxon_wrapper;; esac HTTPPORT=${HTTPPORT:-8000} # Python's default WEBBROWSER=${WEBBROWSER:-firefox} tests= # test* names (should go first) here will become preselected default # # commons # emit_result() { _er_howmany=${1:?} # how many errors (0/anything else incl. strings) _er_subject=${2:?} _er_prefix=${3-} test -z "${_er_prefix}" || _er_prefix="${_er_prefix}: " if test "${_er_howmany}" = 0; then printf "%s%s finished OK\n" "${_er_prefix}" "${_er_subject}" else printf "%s%s encountered ${_er_howmany} errors\n" \ "${_er_prefix}" "${_er_subject}" fi } emit_error() { _ee_msg=${1:?} printf "%s\n" "${_ee_msg}" >&2 } # returns 1 + floor of base 2 logaritm for _lo0r_i in 1...255, # or 0 for _lo0r_i = 0 log2_or_0_return() { _lo0r_i=${1:?} return $(((!(_lo0r_i >> 1) && _lo0r_i) * 1 \ + (!(_lo0r_i >> 2) && _lo0r_i & (1 << 1)) * 2 \ + (!(_lo0r_i >> 3) && _lo0r_i & (1 << 2)) * 3 \ + (!(_lo0r_i >> 4) && _lo0r_i & (1 << 3)) * 4 \ + (!(_lo0r_i >> 5) && _lo0r_i & (1 << 4)) * 5 \ + (!(_lo0r_i >> 6) && _lo0r_i & (1 << 5)) * 6 \ + (!(_lo0r_i >> 7) && _lo0r_i & (1 << 6)) * 7 \ + !!(_lo0r_i >> 7) * 7 )) } # rough addition of two base 2 logarithms log2_or_0_add() { _lo0a_op1=${1:?} _lo0a_op2=${2:?} if test ${_lo0a_op1} -gt ${_lo0a_op2}; then return ${_lo0a_op1} elif test ${_lo0a_op2} -gt ${_lo0a_op1}; then return ${_lo0a_op2} elif test ${_lo0a_op1} -gt 0; then return $((_lo0a_op1 + 1)) else return ${_lo0a_op1} fi } # # test phases # # stdin: input file per line test_browser() { _tb_cleanref=0 _tb_serverpid= while test $# -gt 0; do case "$1" in -r) _tb_cleanref=1;; esac shift done if ! read _tb_first; then return 1 fi cat >/dev/null 2>/dev/null # read out the rest test -f assets/diffview.js \ || curl -SsLo assets/diffview.js \ 'https://raw.githubusercontent.com/prettydiff/prettydiff/2.2.8/lib/diffview.js' { which python3 >/dev/null 2>/dev/null \ && { python3 -m http.server "${HTTPPORT}" -b 127.0.0.1 \ || emit_error "Python3 HTTP server fail"; return; } \ - || which python2 >/dev/null 2>/dev/null \ - && { printf '%s %s\n' \ - 'Python 2 backed HTTP server cannot listen at particular' \ - 'address, discretion regarding firewall rules recommended!' - python2 -m SimpleHTTPServer "${HTTPPORT}" \ - || emit_error 'Python2 HTTP server fail'; return; } \ - || emit_error 'Cannot run Python based HTTP server' ; } & + || emit_error 'Cannot run Python-based HTTP server' ; } & _tb_serverpid=$! ${WEBBROWSER} "http://localhost:${HTTPPORT}/${_tb_first}" & printf "When finished, just press Ctrl+C or kill %d, please\n" \ "${_tb_serverpid}" wait test "${_tb_cleanref}" -eq 0 || rm -f assets/diffview.js } # -r ... whether to remove referential files as well # stdin: input file per line test_cleaner() { _tc_cleanref=0 while test $# -gt 0; do case "$1" in -r) _tc_cleanref=1;; esac shift done while read _tc_origin; do _tc_origin=${_tc_origin%.*} rm -f "${_tc_origin}.up" "${_tc_origin}.up.err" rm -f "$(dirname "${_tc_origin}")/.$(basename "${_tc_origin}").up" test ${_tc_cleanref} -eq 0 \ || rm -f "${_tc_origin}.ref" "${_tc_origin}.ref.err" done } # -a= ... action modifier to derive template name from (if any; enter/leave) # -o= ... which conventional version to deem as the transform origin test_selfcheck() { _tsc_cleanref=0 _tsc_ret=0 _tsc_action= _tsc_template= _tsc_validator= while test $# -gt 0; do case "$1" in -r) _tsc_cleanref=1;; -a=*) _tsc_action="${1#-a=}";; -o=*) _tsc_template="${1#-o=}";; esac shift done _tsc_validator="${_tsc_template:?}" _tsc_validator="cibtr-${_tsc_validator%%.*}.rng" _tsc_action=${_tsc_action:+-${_tsc_action}} _tsc_template="upgrade-${_tsc_template}${_tsc_action}.xsl" # alt. https://relaxng.org/relaxng.rng _tsc_rng_relaxng=https://raw.githubusercontent.com/relaxng/relaxng.org/master/relaxng.rng # alt. https://github.com/ndw/xslt-relax-ng/blob/master/1.0/xslt10.rnc _tsc_rng_xslt=https://raw.githubusercontent.com/relaxng/jing-trang/master/eg/xslt.rng case "${RNGVALIDATOR}" in *xmllint*) test -f "assets/$(basename "${_tsc_rng_relaxng}")" \ || curl -SsLo "assets/$(basename "${_tsc_rng_relaxng}")" \ "${_tsc_rng_relaxng}" test -f "assets/$(basename "${_tsc_rng_xslt}")" \ || curl -SsLo "assets/$(basename "${_tsc_rng_xslt}")" \ "${_tsc_rng_xslt}" test -f assets/xmlcatalog || >assets/xmlcatalog cat <<-EOF EOF RNGVALIDATOR=\ "eval env \"XML_CATALOG_FILES=/etc/xml/catalog $(pwd)/assets/xmlcatalog\" ${RNGVALIDATOR}" # not needed #_tsc_rng_relaxng="assets/$(basename "${_tsc_rng_relaxng}")" #_tsc_rng_xslt="assets/$(basename "${_tsc_rng_xslt}")" ;; esac # check schema (sub-grammar) for custom transformation mapping alone; if test -z "${_tsc_action}" \ && ! ${RNGVALIDATOR} "${_tsc_rng_relaxng}" "${_tsc_validator}"; then _tsc_ret=$((_tsc_ret + 1)) fi # check the overall XSLT per the main grammar + said sub-grammar; test -f "${_tsc_validator}" && _tsc_validator="xslt_${_tsc_validator}" \ || _tsc_validator="${_tsc_rng_xslt}" if ! ${RNGVALIDATOR} "${_tsc_validator}" "${_tsc_template}"; then _tsc_ret=$((_tsc_ret + 1)) fi test "${_tsc_cleanref}" -eq 0 \ || rm -f assets/relaxng.rng assets/xslt.rng assets/xmlcatalog log2_or_0_return ${_tsc_ret} } test_explanation() { _tsc_template= while test $# -gt 0; do case "$1" in -o=*) _tsc_template="upgrade-${1#-o=}.xsl";; esac shift done ${XSLTPROCESSOR} upgrade-detail.xsl "${_tsc_template}" } # stdout: filename of the transformed file test_runner_upgrade() { _tru_template=${1:?} _tru_source=${2:?} # filename _tru_mode=${3:?} # extra modes wrt. "referential" outcome, see below _tru_ref="${_tru_source%.*}.ref" { test "$((_tru_mode & (1 << 0)))" -ne 0 \ || test -f "${_tru_ref}.err"; } \ && _tru_ref_err="${_tru_ref}.err" || _tru_ref_err=/dev/null _tru_target="${_tru_source%.*}.up" _tru_target_err="${_tru_target}.err" if test $((_tru_mode & (1 << 2))) -eq 0; then ${XSLTPROCESSOR} "${_tru_template}" "${_tru_source}" \ > "${_tru_target}" 2> "${_tru_target_err}" \ || { _tru_ref=$?; echo "${_tru_target_err}" return ${_tru_ref}; } else # when -B (deblanked outcomes handling) requested, we: # - drop blanks from the source XML # (effectively emulating pacemaker handling) # - re-drop blanks from the XSLT outcome, # which is compared with referential outcome # processed with even greedier custom deblanking # (extraneous inter-element whitespace like blank # lines will not get removed otherwise, see lower) xmllint --noblanks "${_tru_source}" \ | ${XSLTPROCESSOR} "${_tru_template}" - \ > "${_tru_target}" 2> "${_tru_target_err}" \ || { _tru_ref=$?; echo "${_tru_target_err}" return ${_tru_ref}; } # reusing variable no longer needed _tru_template="$(dirname "${_tru_target}")" _tru_template="${_tru_template}/.$(basename "${_tru_target}")" mv "${_tru_target}" "${_tru_template}" ${XSLTPROCESSOR} - "${_tru_template}" > "${_tru_target}" <<-EOF EOF fi # only respond with the flags except for "-B", i.e., when both: # - _tru_mode non-zero # - "-B" in _tru_mode is zero (hence non-zero when flipped with XOR) if test "$((_tru_mode * ((_tru_mode ^ (1 << 2)) & (1 << 2))))" -ne 0; then if test $((_tru_mode & (1 << 0))) -ne 0; then cp -a "${_tru_target}" "${_tru_ref}" cp -a "${_tru_target_err}" "${_tru_ref_err}" fi if test $((_tru_mode & (1 << 1))) -ne 0; then { "${DIFF}" ${DIFFOPTS} "${_tru_source}" "${_tru_ref}" \ && printf '\n(files match)\n'; } | ${DIFFPAGER} >&2 if test $? -ne 0; then printf "\npager failure\n" >&2 return 1 fi printf '\nIs comparison OK? ' >&2 if read _tru_answer &2; return 1;; esac else return 1 fi fi elif test -f "${_tru_ref}" && test -e "${_tru_ref_err}"; then { test "$((_tru_mode & (1 << 2)))" -eq 0 && cat "${_tru_ref}" \ || ${XSLTPROCESSOR} - "${_tru_ref}" <<-EOF EOF } \ | "${DIFF}" ${DIFFOPTS} - "${_tru_target}" >&2 \ && "${DIFF}" ${DIFFOPTS} "${_tru_ref_err}" \ "${_tru_target_err}" >&2 if test $? -ne 0; then emit_error "Outputs differ from referential ones" echo "/dev/null" return 1 fi else emit_error "Referential file(s) missing: ${_tru_ref}" echo "/dev/null" return 1 fi echo "${_tru_target}" } test_runner_validate() { _trv_schema=${1:?} _trv_target=${2:?} # filename if ! ${RNGVALIDATOR} "${_trv_schema}" "${_trv_target}" \ 2>/dev/null; then ${RNGVALIDATOR} "${_trv_schema}" "${_trv_target}" fi } # -a= ... action modifier completing template name (e.g. 2.10-(enter|leave)) # -o= ... which conventional version to deem as the transform origin # -t= ... which conventional version to deem as the transform target # -B # -D # -G ... see usage # stdin: input file per line test_runner() { _tr_mode=0 _tr_ret=0 _tr_action= _tr_schema_o= _tr_schema_t= _tr_target= _tr_template= while test $# -gt 0; do case "$1" in -a=*) _tr_action="${1#-a=}";; -o=*) _tr_template="${1#-o=}" _tr_schema_o="pacemaker-${1#-o=}.rng";; -t=*) _tr_schema_t="pacemaker-${1#-t=}.rng";; -G) _tr_mode=$((_tr_mode | (1 << 0)));; -D) _tr_mode=$((_tr_mode | (1 << 1)));; -B) _tr_mode=$((_tr_mode | (1 << 2)));; esac shift done _tr_template="upgrade-${_tr_action:-${_tr_template:?}}.xsl" if ! test -f "${_tr_schema_o:?}" || ! test -f "${_tr_schema_t:?}"; then emit_error "Origin and/or target schema missing, rerun make" return 1 fi while read _tr_origin; do printf '%-60s' "${_tr_origin}... " # pre-validate if ! test_runner_validate "${_tr_schema_o}" "${_tr_origin}"; then _tr_ret=$((_tr_ret + 1)); echo "E:pre-validate"; continue fi # upgrade if ! _tr_target=$(test_runner_upgrade "${_tr_template}" \ "${_tr_origin}" "${_tr_mode}"); then _tr_ret=$((_tr_ret + 1)); test -n "${_tr_target}" || break echo "E:upgrade" test -s "${_tr_target}" \ && { echo ---; cat "${_tr_target}" || :; echo ---; } continue fi # post-validate if ! test_runner_validate "${_tr_schema_t}" "${_tr_target}"; then _tr_ret=$((_tr_ret + 1)); echo "E:post-validate"; continue fi echo "OK" done log2_or_0_return ${_tr_ret} } # # particular test variations # -C # -S # -X # -W ... see usage # stdin: granular test specification(s) if any # test2to3() { _t23_cleanopt= _t23_pattern= while read _t23_spec; do _t23_spec=${_t23_spec%.xml} _t23_spec=${_t23_spec%\*} _t23_pattern="${_t23_pattern} -name ${_t23_spec}*.xml -o" done test -z "${_t23_pattern}" || _t23_pattern="( ${_t23_pattern%-o} )" case " $* " in *\ -r\ *) _t23_cleanopt=-r; esac find test-2 -name test-2 -o -type d -prune \ -o -name '*.xml' ${_t23_pattern} -print | env LC_ALL=C sort \ | { case " $* " in *\ -C\ *) test_cleaner;; *\ -S\ *) test_selfcheck -o=2.10 ${_t23_cleanopt};; *\ -X\ *) test_explanation -o=2.10;; *\ -W\ *) test_browser ${_t23_cleanopt};; *) test_runner -o=2.10 -t=3.0 "$@" || return $?;; esac; } } tests="${tests} test2to3" test2to3enter() { _t23e_cleanopt= _t23e_pattern= while read _t23e_spec; do _t23e_spec=${_t23e_spec%.xml} _t23e_spec=${_t23e_spec%\*} _t23e_pattern="${_t23e_pattern} -name ${_t23e_spec}*.xml -o" done test -z "${_t23e_pattern}" || _t23e_pattern="( ${_t23e_pattern%-o} )" case " $* " in *\ -r\ *) _t23e_cleanopt=-r; esac find test-2-enter -name test-2-enter -o -type d -prune \ -o -name '*.xml' ${_t23e_pattern} -print | env LC_ALL=C sort \ | { case " $* " in *\ -C\ *) test_cleaner;; *\ -S\ *) test_selfcheck -a=enter -o=2.10 ${_t23e_cleanopt};; *\ -W\ *) emit_result "not implemented" "option -W";; *\ -X\ *) emit_result "not implemented" "option -X";; *) test_runner -a=2.10-enter -o=2.10 -t=2.10 "$@" || return $?;; esac; } } tests="${tests} test2to3enter" test2to3leave() { _t23l_cleanopt= _t23l_pattern= while read _t23l_spec; do _t23l_spec=${_t23l_spec%.xml} _t23l_spec=${_t23l_spec%\*} _t23l_pattern="${_t23l_pattern} -name ${_t23l_spec}*.xml -o" done test -z "${_t23l_pattern}" || _t23l_pattern="( ${_t23l_pattern%-o} )" case " $* " in *\ -r\ *) _t23l_cleanopt=-r; esac find test-2-leave -name test-2-leave -o -type d -prune \ -o -name '*.xml' ${_t23l_pattern} -print | env LC_ALL=C sort \ | { case " $* " in *\ -C\ *) test_cleaner;; *\ -S\ *) test_selfcheck -a=leave -o=2.10 ${_t23l_cleanopt};; *\ -W\ *) emit_result "not implemented" "option -W";; *\ -X\ *) emit_result "not implemented" "option -X";; *) test_runner -a=2.10-leave -o=3.0 -t=3.0 "$@" || return $?;; esac; } } tests="${tests} test2to3leave" test2to3roundtrip() { _t23rt_cleanopt= _t23rt_pattern= while read _t23tr_spec; do _t23rt_spec=${_t23rt_spec%.xml} _t23rt_spec=${_t23rt_spec%\*} _t23rt_pattern="${_t23rt_pattern} -name ${_t23rt_spec}*.xml -o" done test -z "${_t23rt_pattern}" || _t23rt_pattern="( ${_t23rt_pattern%-o} )" case " $* " in *\ -r\ *) _t23rt_cleanopt=-r; esac find test-2-roundtrip -name test-2-roundtrip -o -type d -prune \ -o -name '*.xml' ${_t23rt_pattern} -print | env LC_ALL=C sort \ | { case " $* " in *\ -C\ *) test_cleaner;; *\ -S\ *) test_selfcheck -a=roundtrip -o=2.10 ${_t23rt_cleanopt};; *\ -W\ *) test_browser ${_t23rt_cleanopt};; *\ -X\ *) emit_result "not implemented" "option -X";; *) test_runner -a=2.10-roundtrip -o=2.10 -t=3.0 "$@" || return $?;; esac; } } tests="${tests} test2to3roundtrip" # -B # -D # -G ... see usage cts_scheduler() { _tcp_mode=0 _tcp_ret=0 _tcp_validatewith= _tcp_schema_o= _tcp_schema_t= _tcp_template= find ../cts/scheduler -name scheduler -o -type d -prune \ -o -name '*.xml' -print | env LC_ALL=C sort \ | { case " $* " in *\ -C\ *) test_cleaner -r;; *\ -S\ *) emit_result "not implemented" "option -S";; *\ -W\ *) emit_result "not implemented" "option -W";; *\ -X\ *) emit_result "not implemented" "option -X";; *) while test $# -gt 0; do case "$1" in -G) _tcp_mode=$((_tcp_mode | (1 << 0)));; -D) _tcp_mode=$((_tcp_mode | (1 << 1)));; -B) _tcp_mode=$((_tcp_mode | (1 << 2)));; esac shift done while read _tcp_origin; do _tcp_validatewith=$(${XSLTPROCESSOR} - "${_tcp_origin}" <<-EOF EOF ) _tcp_schema_t=${_tcp_validatewith} case "${_tcp_validatewith}" in 1) _tcp_schema_o=1.3;; 2) _tcp_schema_o=2.10;; # only for gradual refinement as upgrade-2.10.xsl under # active development, move to 3.x when schema v4 emerges 3) _tcp_schema_o=2.10 _tcp_schema_t=2;; *) emit_error \ "need to skip ${_tcp_origin} (schema: ${_tcp_validatewith})" continue;; esac _tcp_template="upgrade-${_tcp_schema_o}.xsl" _tcp_schema_t="pacemaker-$((_tcp_schema_t + 1)).0.rng" test "${_tcp_schema_o%%.*}" = "${_tcp_validatewith}" \ && _tcp_schema_o="pacemaker-${_tcp_schema_o}.rng" \ || _tcp_schema_o="${_tcp_schema_t}" # pre-validate if test "${_tcp_schema_o}" != "${_tcp_schema_t}" \ && ! test_runner_validate "${_tcp_schema_o}" "${_tcp_origin}"; then _tcp_ret=$((_tcp_ret + 1)); echo "E:pre-validate"; continue fi # upgrade test "$((_tcp_mode & (1 << 0)))" -ne 0 \ || ln -fs "$(pwd)/${_tcp_origin}" "${_tcp_origin%.*}.ref" if ! _tcp_target=$(test_runner_upgrade "${_tcp_template}" \ "${_tcp_origin}" "${_tcp_mode}"); then _tcp_ret=$((_tcp_ret + 1)); test -n "${_tcp_target}" || break echo "E:upgrade" test -s "${_tcp_target}" \ && { echo ---; cat "${_tcp_target}" || :; echo ---; } continue fi test "$((_tcp_mode & (1 << 0)))" -ne 0 \ || rm -f "${_tcp_origin%.*}.ref" # post-validate if ! test_runner_validate "${_tcp_schema_t}" "${_tcp_target}"; then _tcp_ret=$((_tcp_ret + 1)); echo "E:post-validate"; continue fi test "$((_tcp_mode & (1 << 0)))" -eq 0 \ || mv "${_tcp_target}" "${_tcp_origin}" done; log2_or_0_return ${_tcp_ret};; esac; } } tests="${tests} cts_scheduler" # # "framework" # # option-likes ... options to be passed down # argument-likes ... drives a test selection test_suite() { _ts_pass= _ts_select= _ts_select_full= _ts_test_specs= _ts_global_ret=0 _ts_ret=0 while test $# -gt 0; do case "$1" in -) printf '%s\n' 'waiting for tests specified at stdin...'; while read _ts_spec; do _ts_select="${_ts_spec}@$1"; done;; -*) _ts_pass="${_ts_pass} $1";; *) _ts_select_full="${_ts_select_full}@$1" _ts_select="${_ts_select}@${1%%/*}";; esac shift done _ts_select="${_ts_select}@" _ts_select_full="${_ts_select_full}@" for _ts_test in ${tests}; do _ts_test_specs= while true; do case "${_ts_select}" in *@${_ts_test}@*) _ts_test_specs="${_ts_select%%@${_ts_test}@*}"\ "@${_ts_select#*@${_ts_test}@}" if test "${_ts_test_specs}" = @; then _ts_select= # nothing left else _ts_select="${_ts_test_specs}" fi continue ;; @) case "${_ts_test}" in test*) break;; esac # filter ;; esac test -z "${_ts_test_specs}" || break continue 2 # move on to matching with next local test done _ts_test_specs= while true; do case "${_ts_select_full}" in *@${_ts_test}/*) _ts_test_full="${_ts_test}/${_ts_select_full#*@${_ts_test}/}" _ts_test_full="${_ts_test_full%%@*}" _ts_select_full="${_ts_select_full%%@${_ts_test_full}@*}"\ "@${_ts_select_full#*@${_ts_test_full}@}" _ts_test_specs="${_ts_test_specs} ${_ts_test_full#*/}" ;; *) break ;; esac done for _ts_test_spec in ${_ts_test_specs}; do printf '%s\n' "${_ts_test_spec}" done | "${_ts_test}" ${_ts_pass} || _ts_ret=$? test ${_ts_ret} = 0 \ && emit_result ${_ts_ret} "${_ts_test}" \ || emit_result "at least 2^$((_ts_ret - 1))" "${_ts_test}" log2_or_0_add ${_ts_global_ret} ${_ts_ret} _ts_global_ret=$? done if test -n "${_ts_select#@}"; then emit_error "Non-existing test(s):$(echo "${_ts_select}" \ | tr '@' ' ')" log2_or_0_add ${_ts_global_ret} 1 || _ts_global_ret=$? fi return ${_ts_global_ret} } # NOTE: big letters are dedicated for per-test-set behaviour, # small ones for generic/global behaviour usage() { printf \ '%s\n%s\n %s\n %s\n %s\n %s\n %s\n %s\n %s\n %s\n %s\n %s\n %s\n' \ "usage: $0 [-{B,C,D,G,S,X}]* \\" \ " [-|{${tests## }}*]" \ "- when no suites (arguments) provided, \"test*\" ones get used" \ "- with '-' suite specification the actual ones grabbed on stdin" \ "- use '-B' to run validate-only check suppressing blanks first" \ "- use '-C' to only cleanup ephemeral byproducts" \ "- use '-D' to review originals vs. \"referential\" outcomes" \ "- use '-G' to generate \"referential\" outcomes" \ "- use '-S' for template self-check (requires net access)" \ "- use '-W' to run browser-based, on-the-fly diff'ing test drive" \ "- use '-X' to show explanatory details about the upgrade" \ "- some modes (e.g. -{S,W}) take also '-r' for cleanup afterwards" \ "- test specification can be granular, e.g. 'test2to3/022'" printf \ '\n%s\n %s\n %s\n %s\n %s\n %s\n %s\n %s\n' \ 'environment variables affecting the run + default/current values:' \ "- DIFF (${DIFF}): tool to compute and show differences of 2 files" \ "- DIFFOPTS (${DIFFOPTS}): options to the above tool" \ "- DIFFPAGER (${DIFFPAGER}): possibly accompanying the above tool" \ "- RNGVALIDATOR (${RNGVALIDATOR}): RelaxNG validator" \ "- XSLTPROCESSOR (${_XSLTPROCESSOR}): XSLT 1.0 capable processor" \ "- HTTPPORT (${HTTPPORT}): port used by test drive HTTP server run" \ "- WEBBROWSER (${WEBBROWSER}): used for in-browser test drive" } main() { _main_pass= _main_bailout=0 _main_ret=0 while test $# -gt 0; do case "$1" in -h) usage; exit;; -C|-G|-S|-X) _main_bailout=1;; esac _main_pass="${_main_pass} $1" shift done test_suite ${_main_pass} || _main_ret=$? test ${_main_bailout} -ne 0 \ || test_suite -C ${_main_pass} >/dev/null || true test ${_main_ret} = 0 && emit_result ${_main_ret} "Overall suite" \ || emit_result "at least 2^$((_main_ret - 1))" "Overall suite" return ${_main_ret} } main "$@"