diff --git a/GNUmakefile b/GNUmakefile index 60de623287..58ee17e3eb 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,381 +1,381 @@ # -# Copyright 2008-2018 Andrew Beekhof +# Copyright 2008-2019 Andrew Beekhof # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # default: $(shell test ! -e configure && echo init) $(shell test -e configure && echo core) -include Makefile PACKAGE ?= pacemaker # Force 'make dist' to be consistent with 'make export' distdir = $(PACKAGE)-$(TAG) TARFILE = $(PACKAGE)-$(SHORTTAG).tar.gz DIST_ARCHIVES = $(TARFILE) RPM_ROOT = $(shell pwd) RPM_OPTS = --define "_sourcedir $(RPM_ROOT)" \ --define "_specdir $(RPM_ROOT)" \ --define "_srcrpmdir $(RPM_ROOT)" \ MOCK_OPTIONS ?= --resultdir=$(RPM_ROOT)/mock --no-cleanup-after # Default to building Fedora-compliant spec files # SLES: /etc/SuSE-release # openSUSE: /etc/SuSE-release # RHEL: /etc/redhat-release, /etc/system-release # Fedora: /etc/fedora-release, /etc/redhat-release, /etc/system-release # CentOS: /etc/centos-release, /etc/redhat-release, /etc/system-release F ?= $(shell test ! -e /etc/fedora-release && echo 0; test -e /etc/fedora-release && rpm --eval %{fedora}) ARCH ?= $(shell test -e /etc/fedora-release && rpm --eval %{_arch}) MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH)) DISTRO ?= $(shell test -e /etc/SuSE-release && echo suse; echo fedora) COMMIT ?= HEAD TAG ?= $(shell T=$$(git describe --all '$(COMMIT)' | sed -n 's|tags/\(.*\)|\1|p'); \ test -n "$${T}" && echo "$${T}" \ || git log --pretty=format:%H -n 1 '$(COMMIT)') lparen = ( rparen = ) SHORTTAG ?= $(shell case $(TAG) in Pacemaker-*$(rparen) echo '$(TAG)' | cut -c11-;; \ *$(rparen) git log --pretty=format:%h -n 1 '$(TAG)';; esac) SHORTTAG_ABBREV = $(shell printf %s '$(SHORTTAG)' | wc -c) WITH ?= --without doc #WITH ?= --without=doc --with=gcov LAST_RC ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep rc | head -n 1) ifneq ($(origin VERSION), undefined) LAST_RELEASE ?= Pacemaker-$(VERSION) else LAST_RELEASE ?= $(shell git tag -l | grep Pacemaker | sort -Vr | grep -v rc | head -n 1) endif NEXT_RELEASE ?= $(shell echo $(LAST_RELEASE) | awk -F. '/[0-9]+\./{$$3+=1;OFS=".";print $$1,$$2,$$3}') BUILD_COUNTER ?= build.counter LAST_COUNT = $(shell test ! -e $(BUILD_COUNTER) && echo 0; test -e $(BUILD_COUNTER) && cat $(BUILD_COUNTER)) COUNT = $(shell expr 1 + $(LAST_COUNT)) SPECVERSION ?= $(COUNT) # toplevel rsync destination for www targets (without trailing slash) RSYNC_DEST ?= root@www.clusterlabs.org:/var/www/html # recursive, preserve symlinks/permissions/times, verbose, compress, # don't cross filesystems, sparse, show progress RSYNC_OPTS = -rlptvzxS --progress # rpmbuild wrapper that translates "--with[out] FEATURE" into RPM macros # # Unfortunately, at least recent versions of rpm do not support mentioned # switch. To work this around, we can emulate mechanism that rpm uses # internally: unfold the flags into respective macro definitions: # # --with[out] FOO -> --define "_with[out]_FOO --with[out]-FOO" # -# $(1) ... WITH string (e.g., --with pre_release --without cman) +# $(1) ... WITH string (e.g., --with pre_release --without doc) # $(2) ... options following the initial "rpmbuild" in the command # $(3) ... final arguments determined with $2 (e.g., pacemaker.spec) # # Note that if $(3) is a specfile, extra case is taken so as to reflect # pcmkversion correctly (using in-place modification). # # Also note that both ways to specify long option with an argument # (i.e., what getopt and, importantly, rpm itself support) can be used: # # --with FOO # --with=FOO rpmbuild-with = \ WITH=$$(getopt -o "" -l with:,without: -- $(1)) || exit 1; \ CMD='rpmbuild $(2)'; PREREL=0; \ eval set -- "$${WITH}"; \ while true; do \ case "$$1" in \ --with) CMD="$${CMD} --define \"_with_$$2 --with-$$2\""; \ [ "$$2" != pre_release ] || PREREL=1; shift 2;; \ --without) CMD="$${CMD} --define \"_without_$$2 --without-$$2\""; \ [ "$$2" != pre_release ] || PREREL=0; shift 2;; \ --) shift ; break ;; \ *) echo "cannot parse WITH: $$1"; exit 1;; \ esac; \ done; \ case "$(3)" in \ *.spec) { [ $${PREREL} -eq 0 ] || [ $(LAST_RELEASE) = $(TAG) ]; } \ && sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3) \ || sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(NEXT_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3);; \ esac; \ CMD="$${CMD} $(3)"; \ eval "$${CMD}" init: ./autogen.sh init export: rm -f $(PACKAGE)-dirty.tar.* $(PACKAGE)-tip.tar.* $(PACKAGE)-HEAD.tar.* if [ ! -f $(TARFILE) ]; then \ rm -f $(PACKAGE).tar.*; \ if [ $(TAG) = dirty ]; then \ git commit -m "DO-NOT-PUSH" -a; \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" HEAD^{tree}; \ git reset --mixed HEAD^; \ else \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" $(TAG)^{tree}; \ fi; \ echo `date`: Rebuilt $(TARFILE); \ else \ echo `date`: Using existing tarball: $(TARFILE); \ fi $(PACKAGE)-opensuse.spec: $(PACKAGE)-suse.spec cp $^ $@ @echo Rebuilt $@ $(PACKAGE)-suse.spec: $(PACKAGE).spec.in GNUmakefile rm -f $@ if [ x != x"`git ls-files -m | grep pacemaker.spec.in`" ]; then \ cp $(PACKAGE).spec.in $@; \ echo "Rebuilt $@ (local modifications)"; \ elif [ x = x"`git show $(TAG):pacemaker.spec.in 2>/dev/null`" ]; then \ cp $(PACKAGE).spec.in $@; \ echo "Rebuilt $@"; \ else \ git show $(TAG):$(PACKAGE).spec.in >> $@; \ echo "Rebuilt $@ from $(TAG)"; \ fi sed -i \ -e 's:%{_docdir}/%{name}:%{_docdir}/%{name}-%{version}:g' \ -e 's:%{name}-libs:lib%{name}3:g' \ -e 's: libtool-ltdl-devel\(%{?_isa}\)\?::g' \ -e 's:bzip2-devel:libbz2-devel:g' \ -e 's:docbook-style-xsl:docbook-xsl-stylesheets:g' \ -e 's: byacc::g' \ -e 's:gnutls-devel:libgnutls-devel:g' \ -e 's:corosynclib:libcorosync:g' \ -e 's:cluster-glue-libs:libglue:g' \ -e 's:shadow-utils:shadow:g' \ -e 's: publican::g' \ -e 's: 189: 90:g' \ -e 's:%{_libexecdir}/lcrso:%{_libdir}/lcrso:g' \ -e 's:procps-ng:procps:g' \ $@ @echo "Applied SUSE-specific modifications" # Works for all fedora based distros $(PACKAGE)-%.spec: $(PACKAGE).spec.in rm -f $@ if [ x != x"`git ls-files -m | grep pacemaker.spec.in`" ]; then \ cp $(PACKAGE).spec.in $(PACKAGE)-$*.spec; \ echo "Rebuilt $@ (local modifications)"; \ elif [ x = x"`git show $(TAG):pacemaker.spec.in 2>/dev/null`" ]; then \ cp $(PACKAGE).spec.in $(PACKAGE)-$*.spec; \ echo "Rebuilt $@"; \ else \ git show $(TAG):$(PACKAGE).spec.in >> $(PACKAGE)-$*.spec; \ echo "Rebuilt $@ from $(TAG)"; \ fi srpm-%: export $(PACKAGE)-%.spec rm -f *.src.rpm cp $(PACKAGE)-$*.spec $(PACKAGE).spec echo "* $(shell date +"%a %b %d %Y") Andrew Beekhof $(shell git describe --tags $(TAG) | sed -e s:Pacemaker-:: -e s:-.*::)-1" >> $(PACKAGE).spec echo " - See included ChangeLog file or https://raw.github.com/ClusterLabs/pacemaker/master/ChangeLog for full details" >> $(PACKAGE).spec if [ -e $(BUILD_COUNTER) ]; then \ echo $(COUNT) > $(BUILD_COUNTER); \ fi sed -e 's/global\ specversion\ .*/global\ specversion\ $(SPECVERSION)/' \ -e 's/global\ commit\ .*/global\ commit\ $(TAG)/' \ -e 's/global\ commit_abbrev\ .*/global\ commit_abbrev\ $(SHORTTAG_ABBREV)/' \ -i $(PACKAGE).spec $(call rpmbuild-with,$(WITH),-bs --define "dist .$*" $(RPM_OPTS),$(PACKAGE).spec) chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG) echo "Done" mock-next: make F=$(shell expr 1 + $(F)) mock mock-rawhide: make F=rawhide mock mock-install-%: echo "Installing packages" mock --root=$* $(MOCK_OPTIONS) --install $(RPM_ROOT)/mock/*.rpm vi sudo valgrind lcov gdb fence-agents psmisc mock-install: mock-install-$(MOCK_CFG) echo "Done" mock-sh: mock-sh-$(MOCK_CFG) echo "Done" mock-sh-%: echo "Connecting" mock --root=$* $(MOCK_OPTIONS) --shell echo "Done" -# eg. WITH="--with cman" make rpm +# eg. make WITH="--with pre_release" rpm mock-%: make srpm-$(firstword $(shell echo $(@:mock-%=%) | tr '-' ' ')) -rm -rf $(RPM_ROOT)/mock @echo "mock --root=$* --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm" mock --root=$* --no-cleanup-after --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm srpm: srpm-$(DISTRO) echo "Done" mock: mock-$(MOCK_CFG) echo "Done" rpm-dep: $(PACKAGE)-$(DISTRO).spec if [ x != x`which yum-builddep 2>/dev/null` ]; then \ echo "Installing with yum-builddep"; \ sudo yum-builddep $(PACKAGE)-$(DISTRO).spec; \ elif [ x != x`which yum 2>/dev/null` ]; then \ echo -e "Installing: $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')\n\n"; \ sudo yum install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' '); \ elif [ x != x`which zypper` ]; then \ echo -e "Installing: $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')\n\n"; \ sudo zypper install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ');\ else \ echo "I don't know how to install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')";\ fi rpm: srpm @echo To create custom builds, edit the flags and options in $(PACKAGE).spec first $(call rpmbuild-with,$(WITH),$(RPM_OPTS),--rebuild $(RPM_ROOT)/*.src.rpm) release: make TAG=$(LAST_RELEASE) rpm rc: make TAG=$(LAST_RC) rpm dirty: make TAG=dirty mock COVERITY_DIR = $(shell pwd)/coverity-$(TAG) COVFILE = $(PACKAGE)-coverity-$(TAG).tgz COVHOST ?= scan5.coverity.com COVPASS ?= password # Public coverity coverity: test -e configure || ./autogen.sh test -e Makefile || ./configure make core-clean rm -rf $(COVERITY_DIR) cov-build --dir $(COVERITY_DIR) make core tar czf $(COVFILE) --transform=s@.*$(TAG)@cov-int@ $(COVERITY_DIR) @echo "Uploading to public Coverity instance..." curl --form file=@$(COVFILE) --form project=$(PACKAGE) --form password=$(COVPASS) --form email=andrew@beekhof.net http://$(COVHOST)/cgi-bin/upload.py rm -rf $(COVFILE) $(COVERITY_DIR) coverity-corp: test -e configure || ./autogen.sh test -e Makefile || ./configure make core-clean rm -rf $(COVERITY_DIR) cov-build --dir $(COVERITY_DIR) make core @echo "Waiting for a corporate Coverity license..." cov-analyze --dir $(COVERITY_DIR) --wait-for-license cov-format-errors --dir $(COVERITY_DIR) --emacs-style > $(TAG).coverity cov-format-errors --dir $(COVERITY_DIR) rsync $(RSYNC_OPTS) "$(COVERITY_DIR)/c/output/errors/" "$(RSYNC_DEST)/$(PACKAGE)/coverity/$(TAG)/" make core-clean # cov-commit-defects --host $(COVHOST) --dir $(COVERITY_DIR) --stream $(PACKAGE) --user auto --password $(COVPASS) rm -rf $(COVERITY_DIR) global: clean-generic gtags -q global-upload: global htags -sanhIT rsync $(RSYNC_OPTS) HTML/ "$(RSYNC_DEST)/$(PACKAGE)/global/$(TAG)/" %.8.html: %.8 echo groff -mandoc `man -w ./$<` -T html > $@ groff -mandoc `man -w ./$<` -T html > $@ rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/$(PACKAGE)/man/" %.7.html: %.7 echo groff -mandoc `man -w ./$<` -T html > $@ groff -mandoc `man -w ./$<` -T html > $@ rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/$(PACKAGE)/man/" manhtml-upload: all find . -name "[a-z]*.[78]" -exec make \{\}.html \; doxygen: Doxyfile doxygen Doxyfile doxygen-upload: doxygen rsync $(RSYNC_OPTS) doc/api/html/ "$(RSYNC_DEST)/$(PACKAGE)/doxygen/$(TAG)/" abi: ./abi-check pacemaker $(LAST_RELEASE) $(TAG) abi-www: export RSYNC_DEST=$(RSYNC_DEST); ./abi-check -u pacemaker $(LAST_RELEASE) $(TAG) www: manhtml-upload global-upload doxygen-upload make RSYNC_DEST=$(RSYNC_DEST) -C doc www summary: @printf "\n* `date +"%a %b %d %Y"` `git config user.name` <`git config user.email`> $(NEXT_RELEASE)-1" @printf "\n- Changesets: `git log --pretty=oneline $(LAST_RELEASE)..HEAD | wc -l`" @printf "\n- Diff: " @git diff $(LAST_RELEASE)..HEAD --shortstat include lib daemons tools xml rc-changes: @make NEXT_RELEASE=$(shell echo $(LAST_RC) | sed s:-rc.*::) LAST_RELEASE=$(LAST_RC) changes changes: summary @printf "\n- Features added since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e Feature: | sed -e 's@Feature:@@' | sort -uf @printf "\n- Changes since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --no-merges --abbrev-commit $(LAST_RELEASE)..HEAD \ | grep -e High: -e Fix: -e Bug | sed \ -e 's@\(Fix\|High\|Bug\):@@' \ -e 's@\(cib\|pacemaker-based\|based\):@CIB:@' \ -e 's@\(crmd\|pacemaker-controld\|controld\):@controller:@' \ -e 's@\(lrmd\|pacemaker-execd\|execd\):@executor:@' \ -e 's@\(Fencing\|stonithd\|stonith\|pacemaker-fenced\|fenced\):@fencing:@' \ -e 's@\(PE\|pengine\|pacemaker-schedulerd\|schedulerd\):@scheduler:@' \ | sort -uf changelog: @make changes > ChangeLog @printf "\n">> ChangeLog git show $(LAST_RELEASE):ChangeLog >> ChangeLog DO_NOT_INDENT = lib/gnu daemons/controld/controld_fsa.h indent: find . -name "*.[ch]" -exec ./p-indent \{\} \; git co HEAD $(DO_NOT_INDENT) rel-tags: tags find . -name TAGS -exec sed -i 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \; CLANG_analyzer = $(shell which scan-build) CLANG_checkers = # Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.: # --enable={warning,style,performance,portability,information,all} # --inconclusive --std=posix CPPCHECK_ARGS ?= cppcheck: cppcheck $(CPPCHECK_ARGS) -I include --max-configs=25 -q replace lib daemons tools clang: test -e $(CLANG_analyzer) scan-build $(CLANG_checkers:%=-enable-checker %) make clean all # V3 = scandir unsetenv alphasort xalloc # V2 = setenv strerror strchrnul strndup # http://www.gnu.org/software/gnulib/manual/html_node/Initial-import.html#Initial-import GNU_MODS = crypto/md5 gnulib-update: -test ! -e gnulib && git clone git://git.savannah.gnu.org/gnulib.git cd gnulib && git pull gnulib/gnulib-tool --source-base=lib/gnu --lgpl=2 --no-vc-files --import $(GNU_MODS) diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index 8b6b2ac9b7..6bf16daa63 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -1,1296 +1,1297 @@ #!@BASH_PATH@ # # Copyright 2004-2018 Andrew Beekhof # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # USAGE_TEXT="Usage: cts-scheduler [] Options: --help Display this text, then exit -V, --verbose Display any differences from expected output --run TEST Run only single specified test --update Update expected results with actual results -b, --binary PATH Specify path to crm_simulate -i, --io-dir PATH Specify path to regression test data directory -v, --valgrind Run all commands under valgrind --valgrind-dhat Run all commands under valgrind with heap analyzer --valgrind-skip-output If running under valgrind, don't display output --testcmd-options Additional options for command under test" SBINDIR="@sbindir@" BUILDDIR="@abs_top_builddir@" CRM_SCHEMA_DIRECTORY="@CRM_SCHEMA_DIRECTORY@" # If readlink supports -e (i.e. GNU), use it readlink -e / >/dev/null 2>/dev/null if [ $? -eq 0 ]; then test_home="$(dirname "$(readlink -e "$0")")" else test_home="$(dirname "$0")" fi io_dir="$test_home/scheduler" failed="$test_home/.regression.failed.diff" test_binary= testcmd_options= single_test= verbose=0 num_failed=0 num_tests=0 VALGRIND_CMD="" VALGRIND_OPTS="-q --gen-suppressions=all --log-file=%q{valgrind_output} --time-stamp=yes --trace-children=no --show-reachable=no --leak-check=full --num-callers=20 --suppressions=$test_home/valgrind-pcmk.suppressions" VALGRIND_DHAT_OPTS="--tool=exp-dhat --log-file=%q{valgrind_output} --time-stamp=yes --trace-children=no --show-top-n=100 --num-callers=4" diff_opts="--ignore-all-space --ignore-blank-lines -u -N" # These constants must track crm_exit_t values CRM_EX_OK=0 CRM_EX_ERROR=1 CRM_EX_NOT_INSTALLED=5 CRM_EX_USAGE=64 CRM_EX_NOINPUT=66 EXITCODE=$CRM_EX_OK function info() { printf "$*\n" } function error() { printf " * ERROR: $*\n" } function failed() { printf " * FAILED: $*\n" } function show_test() { name=$1; shift printf " Test %-25s $*\n" "$name:" } # Normalize scheduler output for comparison normalize() { for NORMALIZE_FILE in "$@"; do # sed -i is not portable :-( sed -e 's/crm_feature_set="[^"]*"//' \ -e 's/batch-limit="[0-9]*"//' \ "$NORMALIZE_FILE" > "${NORMALIZE_FILE}.$$" mv -- "${NORMALIZE_FILE}.$$" "$NORMALIZE_FILE" done } info "Test home is:\t$test_home" create_mode="false" while [ $# -gt 0 ] ; do case "$1" in -V|--verbose) verbose=1 shift ;; -v|--valgrind) export G_SLICE=always-malloc VALGRIND_CMD="valgrind $VALGRIND_OPTS" shift ;; --valgrind-dhat) VALGRIND_CMD="valgrind $VALGRIND_DHAT_OPTS" shift ;; --valgrind-skip-output) VALGRIND_SKIP_OUTPUT=1 shift ;; --update) create_mode="true" shift ;; --run) single_test=$(basename "$2" ".xml") shift 2 break # any remaining arguments will be passed to test command ;; -b|--binary) test_binary="$2" shift 2 ;; -i|--io-dir) io_dir="$2" shift 2 ;; --help) echo "$USAGE_TEXT" exit $CRM_EX_OK ;; --testcmd-options) testcmd_options=$2 shift 2 ;; *) error "unknown option: $1" exit $CRM_EX_USAGE ;; esac done if [ -z "$PCMK_schema_directory" ]; then if [ -d "$BUILDDIR/xml" ]; then export PCMK_schema_directory="$BUILDDIR/xml" elif [ -d "$CRM_SCHEMA_DIRECTORY" ]; then export PCMK_schema_directory="$CRM_SCHEMA_DIRECTORY" fi fi if [ -z "$test_binary" ]; then if [ -x "$BUILDDIR/tools/crm_simulate" ]; then test_binary="$BUILDDIR/tools/crm_simulate" elif [ -x "$SBINDIR/crm_simulate" ]; then test_binary="$SBINDIR/crm_simulate" fi fi if [ ! -x "$test_binary" ]; then error "Test binary $test_binary not found" exit $CRM_EX_NOT_INSTALLED fi info "Test binary is:\t$test_binary" if [ -n "$PCMK_schema_directory" ]; then info "Schema home is:\t$PCMK_schema_directory" fi if [ "x$VALGRIND_CMD" != "x" ]; then info "Activating memory testing with valgrind"; fi info " " test_cmd="$VALGRIND_CMD $test_binary $testcmd_options" #echo $test_cmd if [ "$(whoami)" != "root" ]; then declare -x CIB_shadow_dir=/tmp fi do_test() { did_fail=0 expected_rc=0 num_tests=$(( $num_tests + 1 )) base=$1; shift name=$1; shift input=$io_dir/${base}.xml output=$io_dir/${base}.out expected=$io_dir/${base}.exp dot_expected=$io_dir/${base}.dot dot_output=$io_dir/${base}.pe.dot scores=$io_dir/${base}.scores score_output=$io_dir/${base}.scores.pe stderr_expected=$io_dir/${base}.stderr stderr_output=$io_dir/${base}.stderr.pe summary=$io_dir/${base}.summary summary_output=$io_dir/${base}.summary.pe valgrind_output=$io_dir/${base}.valgrind export valgrind_output if [ "x$1" = "x--rc" ]; then expected_rc=$2 shift; shift; fi show_test "$base" "$name" if [ ! -f $input ]; then error "No input"; did_fail=1 num_failed=$(( $num_failed + 1 )) return $CRM_EX_NOINPUT; fi if [ "$create_mode" != "true" ] && [ ! -f "$expected" ]; then error "no stored output"; return $CRM_EX_NOINPUT; fi # ../admin/crm_verify -X $input if [ ! -z "$single_test" ]; then echo "CIB_shadow_dir=\"$io_dir\" $test_cmd -x \"$input\" -D \"$dot_output\" -G \"$output\" -S" "$@" CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -D "$dot_output" \ -G "$output" -S "$@" 2>&1 | tee "$summary_output" else CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -S &> "$summary_output" fi CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -D "$dot_output" \ -G "$output" -SQ -s "$@" 2> "$stderr_output" > "$score_output" rc=$? if [ $rc -ne $expected_rc ]; then failed "Test returned: $rc"; did_fail=1 echo "CIB_shadow_dir=\"$io_dir\" $test_cmd -x \"$input\" -D \"$dot_output\" -G \"$output\" -SQ -s" "$@" fi if [ -z "$VALGRIND_SKIP_OUTPUT" ]; then if [ -s "${valgrind_output}" ]; then error "Valgrind reported errors"; did_fail=1 cat ${valgrind_output} fi rm -f ${valgrind_output} fi if [ -s core ]; then error "Core-file detected: core.${base}"; did_fail=1 rm -f $test_home/core.$base mv core $test_home/core.$base fi if [ -e "$stderr_expected" ]; then diff $diff_opts $stderr_expected $stderr_output >/dev/null rc2=$? if [ $rc2 -ne 0 ]; then failed "stderr changed"; diff $diff_opts $stderr_expected $stderr_output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 fi elif [ -s "$stderr_output" ]; then error "Output was written to stderr" did_fail=1 cat $stderr_output fi rm -f $stderr_output if [ ! -s $output ]; then error "No graph produced"; did_fail=1 num_failed=$(( $num_failed + 1 )) rm -f $output return $CRM_EX_ERROR; fi if [ ! -s $dot_output ]; then error "No dot-file summary produced"; did_fail=1 num_failed=$(( $num_failed + 1 )) rm -f $output return $CRM_EX_ERROR; else echo "digraph \"g\" {" > $dot_output.sort LC_ALL=POSIX sort -u $dot_output | grep -v -e '^}$' -e digraph >> $dot_output.sort echo "}" >> $dot_output.sort mv -f $dot_output.sort $dot_output fi if [ ! -s $score_output ]; then error "No allocation scores produced"; did_fail=1 num_failed=$(( $num_failed + 1 )) rm $output return $CRM_EX_ERROR; else LC_ALL=POSIX sort $score_output > $score_output.sorted mv -f $score_output.sorted $score_output fi if [ "$create_mode" = "true" ]; then cp "$output" "$expected" cp "$dot_output" "$dot_expected" cp "$score_output" "$scores" cp "$summary_output" "$summary" info " Updated expected outputs" fi diff $diff_opts $summary $summary_output >/dev/null rc2=$? if [ $rc2 -ne 0 ]; then failed "summary changed"; diff $diff_opts $summary $summary_output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 fi diff $diff_opts $dot_expected $dot_output >/dev/null rc=$? if [ $rc -ne 0 ]; then failed "dot-file summary changed"; diff $diff_opts $dot_expected $dot_output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 else rm -f $dot_output fi normalize "$expected" "$output" diff $diff_opts $expected $output >/dev/null rc2=$? if [ $rc2 -ne 0 ]; then failed "xml-file changed"; diff $diff_opts $expected $output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 fi diff $diff_opts $scores $score_output >/dev/null rc=$? if [ $rc -ne 0 ]; then failed "scores-file changed"; diff $diff_opts $scores $score_output 2>/dev/null >> $failed echo "" >> $failed did_fail=1 fi rm -f $output $score_output $summary_output if [ $did_fail -eq 1 ]; then num_failed=$(( $num_failed + 1 )) return $CRM_EX_ERROR fi return $CRM_EX_OK } function test_results { if [ $num_failed -ne 0 ]; then if [ -s "$failed" ]; then if [ $verbose -eq 1 ]; then error "Results of $num_failed failed tests (out of $num_tests):" cat $failed else error "Results of $num_failed failed tests (out of $num_tests) are in $failed" error "Use -V to display them after running the tests" fi else error "$num_failed (of $num_tests) tests failed (no diff results)" rm $failed fi EXITCODE=$CRM_EX_ERROR fi } # zero out the error log true > $failed if [ -n "$single_test" ]; then do_test "$single_test" "Single shot" "$@" TEST_RC=$? cat "$failed" exit $TEST_RC fi DO_VERSIONED_TESTS=0 info Performing the following tests from $io_dir echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "Negative group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" do_test bug-lf-2613 "Move group on failure" do_test bug-lf-2619 "Move group on clone failure" do_test group-fail "Ensure stop order is preserved for partially active groups" do_test group-unmanaged "No need to restart r115 because r114 is unmanaged" do_test group-unmanaged-stopped "Make sure r115 is stopped when r114 fails" do_test group-dependents "Account for the location preferences of things colocated with a group" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" do_test orphan-2 "Orphan stop, remove failcount" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test params-5 "Params: Restart based on probe digest" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" do_test params-6 "Params: Detect reload in previously migrated resource" do_test nvpair-id-ref "Support id-ref in nvpair with optional name" do_test not-reschedule-unneeded-monitor "Do not reschedule unneeded monitors while resource definitions have changed" do_test reload-becomes-restart "Cancel reload if restart becomes required" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : master" do_test target-2 "Target Role : invalid" echo "" do_test base-score "Set a node's default score for all nodes" echo "" do_test date-1 "Dates" -t "2005-020" do_test date-2 "Date Spec - Pass" -t "2005-020T12:30" do_test date-3 "Date Spec - Fail" -t "2005-020T11:30" do_test origin "Timing of recurring operations" -t "2014-05-07 00:28:00" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test probe-3 "Probe (pending node)" do_test probe-4 "Probe (pending node + stopped resource)" do_test standby "Standby" do_test comments "Comments" echo "" do_test one-or-more-0 "Everything starts" do_test one-or-more-1 "Nothing starts because of A" do_test one-or-more-2 "D can start because of C" do_test one-or-more-3 "D cannot start because of B and C" do_test one-or-more-4 "D cannot start because of target-role" do_test one-or-more-5 "Start A and F even though C and D are stopped" do_test one-or-more-6 "Leave A running even though B is stopped" do_test one-or-more-7 "Leave A running even though C is stopped" do_test bug-5140-require-all-false "Allow basegrp:0 to stop" do_test clone-require-all-1 "clone B starts node 3 and 4" do_test clone-require-all-2 "clone B remains stopped everywhere" do_test clone-require-all-3 "clone B stops everywhere because A stops everywhere" do_test clone-require-all-4 "clone B remains on node 3 and 4 with only one instance of A remaining." do_test clone-require-all-5 "clone B starts on node 1 3 and 4" do_test clone-require-all-6 "clone B remains active after shutting down instances of A" do_test clone-require-all-7 "clone A and B both start at the same time. all instances of A start before B." do_test clone-require-all-no-interleave-1 "C starts everywhere after A and B" do_test clone-require-all-no-interleave-2 "C starts on nodes 1, 2, and 4 with only one active instance of B" do_test clone-require-all-no-interleave-3 "C remains active when instance of B is stopped on one node and started on another." do_test one-or-more-unrunnable-instances "Avoid dependencies on instances that won't ever be started" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (mandatory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependent clones" do_test order-sets "Ordering for resource sets" do_test order-serialize "Serialize resources without inhibiting migration" do_test order-serialize-set "Serialize a set of resources without inhibiting migration" do_test clone-order-primitive "Order clone start after a primitive" do_test clone-order-16instances "Verify ordering of 16 cloned resources" do_test order-optional-keyword "Order (optional keyword)" do_test order-mandatory "Order (mandatory keyword)" do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones" do_test ordered-set-basic-startup "Constraint set with default order settings." do_test ordered-set-natural "Allow natural set ordering" do_test order-wrong-kind "Order (error)" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" do_test coloc-negative-group "Negative colocation with a group" do_test coloc-intra-set "Intra-set colocation" do_test bug-lf-2435 "Colocation sets with a negative score" do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependent must stop" do_test coloc_fp_logic "Verify floating point calculations in colocation are working" do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc." do_test colo_slave_w_native "cl#5070 - Verify promotion order is affected when colocating slave to native rsc." do_test anti-colocation-order "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" do_test anti-colocation-master "Organize order of actions for master resources in anti-colocations" do_test anti-colocation-slave "Organize order of actions for slave resources in anti-colocations" do_test enforce-colo1 "Always enforce B with A INFINITY." do_test complex_enforce_colo "Always enforce B with A INFINITY. (make sure heat-engine stops)" echo "" do_test rsc-sets-seq-true "Resource Sets - sequential=false" do_test rsc-sets-seq-false "Resource Sets - sequential=true" do_test rsc-sets-clone "Resource Sets - Clone" do_test rsc-sets-master "Resource Sets - Master" do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " do_test per-node-attrs "Per node resource parameters" echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" do_test monitor-recovery "on-fail=block + resource recovery detected by recurring monitor" do_test stop-failure-no-quorum "Stop failure without quorum" do_test stop-failure-no-fencing "Stop failure without fencing available" do_test stop-failure-with-fencing "Stop failure with fencing available" do_test multiple-active-block-group "Support of multiple-active=block for resource groups" do_test multiple-monitor-one-failed "Consider resource failed if any of the configured monitor operations failed" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" do_test bug-cl-5212 "No promotion with no-quorum-policy=freeze" do_test suicide-needed-inquorate "no-quorum-policy=suicide: suicide necessary" do_test suicide-not-needed-initial-quorum "no-quorum-policy=suicide: suicide not necessary at initial quorum" do_test suicide-not-needed-never-quorate "no-quorum-policy=suicide: suicide not necessary if never quorate" do_test suicide-not-needed-quorate "no-quorum-policy=suicide: suicide necessary if quorate" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-begin "Normal migration" do_test migrate-success "Completed migration" do_test migrate-partial-1 "Completed migration, missing stop on source" do_test migrate-partial-2 "Successful migrate_to only" do_test migrate-partial-3 "Successful migrate_to only, target down" do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from" do_test bug-5186-partial-migrate "Handle partial migration when src node loses membership" do_test migrate-fail-2 "Failed migrate_from" do_test migrate-fail-3 "Failed migrate_from + stop on source" do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-5 "Failed migrate_from + stop on source and target" do_test migrate-fail-6 "Failed migrate_to" do_test migrate-fail-7 "Failed migrate_to + stop on source" do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" do_test migrate-fail-9 "Failed migrate_to + stop on source and target" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-stop-start-complex "Migration in a complex moving stack" do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" do_test migrate-fencing "Migration after Fencing" do_test migrate-both-vms "Migrate two VMs that have no colocation" do_test migration-behind-migrating-remote "Migrate resource behind migrating remote connection" do_test 1-a-then-bm-move-b "Advanced migrate logic. A then B. migrate B." do_test 2-am-then-b-move-a "Advanced migrate logic, A then B, migrate A without stopping B" do_test 3-am-then-bm-both-migrate "Advanced migrate logic. A then B. migrate both" do_test 4-am-then-bm-b-not-migratable "Advanced migrate logic, A then B, B not migratable" do_test 5-am-then-bm-a-not-migratable "Advanced migrate logic. A then B. move both, a not migratable" do_test 6-migrate-group "Advanced migrate logic, migrate a group" do_test 7-migrate-group-one-unmigratable "Advanced migrate logic, migrate group mixed with allow-migrate true/false" do_test 8-am-then-bm-a-migrating-b-stopping "Advanced migrate logic, A then B, A migrating, B stopping" do_test 9-am-then-bm-b-migrating-a-stopping "Advanced migrate logic, A then B, B migrate, A stopping" do_test 10-a-then-bm-b-move-a-clone "Advanced migrate logic, A clone then B, migrate B while stopping A" do_test 11-a-then-bm-b-move-a-clone-starting "Advanced migrate logic, A clone then B, B moving while A is start/stopping" do_test a-promote-then-b-migrate "A promote then B start. migrate B" do_test a-demote-then-b-migrate "A demote then B stop. migrate B" if [ $DO_VERSIONED_TESTS -eq 1 ]; then do_test migrate-versioned "Disable migration for versioned resources" fi #echo "" #do_test complex1 "Complex " do_test bug-lf-2422 "Dependency on partially active group - stop ocfs:*" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test clone-anon-failcount "Merge failcounts for anonymous clones" do_test force-anon-clone-max "Update clone-max properly when forcing a clone to be anonymous" do_test anon-instance-pending "Assign anonymous clone instance numbers properly when action pending" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test cloned-group-stop "Ensure stopping qpidd also stops glance and cinder" do_test clone-no-shuffle "Don't prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Don't shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" do_test bug-lf-2153 "Clone ordering constraints" do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone" do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)" do_test clone-colocate-instance-2 "Colocation with a specific clone instance" do_test clone-order-instance "Ordering with specific clone instances" do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation" do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups" do_test bug-lf-2544 "Balanced clone placement" do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0" do_test bug-lf-2574 "Avoid clone shuffle" do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start" do_test bug-cl-5168 "Don't shuffle clones" do_test bug-cl-5170 "Prevent clone from starting with on-fail=block" do_test clone-fail-block-colocation "Move colocated group when failed clone has on-fail=block" do_test clone-interleave-1 "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" do_test rebalance-unique-clones "Rebalance unique clone instances with no stickiness" do_test clone-requires-quorum-recovery "Clone with requires=quorum on failed node needing recovery" do_test clone-requires-quorum "Clone with requires=quorum with presumed-inactive instance on failed node" echo "" do_test cloned_start_one "order first clone then clone... first clone_min=2" do_test cloned_start_two "order first clone then clone... first clone_min=2" do_test cloned_stop_one "order first clone then clone... first clone_min=2" do_test cloned_stop_two "order first clone then clone... first clone_min=2" do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true" do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true" do_test clone_min_interleave_stop_one "order first clone then clone... first clone_min=2 and then has interleave=true" do_test clone_min_interleave_stop_two "order first clone then clone... first clone_min=2 and then has interleave=true" do_test clone_min_start_one "order first clone then primitive... first clone_min=2" do_test clone_min_start_two "order first clone then primitive... first clone_min=2" do_test clone_min_stop_all "order first clone then primitive... first clone_min=2" do_test clone_min_stop_one "order first clone then primitive... first clone_min=2" do_test clone_min_stop_two "order first clone then primitive... first clone_min=2" echo "" do_test unfence-startup "Clean unfencing" do_test unfence-definition "Unfencing when the agent changes" do_test unfence-parameters "Unfencing when the agent parameters changes" do_test unfence-device "Unfencing when a cluster has only fence devices" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Don't retry failed demote actions" do_test master-failed-demote-2 "Don't retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" do_test bug-lf-2358 "Master-Master anti-colocation" do_test master-promotion-constraint "Mandatory master colocation constraints" do_test unmanaged-master "Ensure role is preserved for unmanaged resources" do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters" do_test master-demote-2 "Demote does not clear past failure" do_test master-move "Move master based on failure of colocated group" do_test master-probed-score "Observe the promotion score of probed resources" do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint" do_test colocation_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint" do_test order_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by order constraint" do_test order_constraint_stops_slave "cl#5054 - Ensure slave is not demoted when stopped by order constraint" do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion." do_test bug-rh-880249 "Handle replacement of an m/s resource with a primitive" do_test bug-5143-ms-shuffle "Prevent master shuffling due to promotion score" do_test master-demote-block "Block promotion if demote fails with on-fail=block" do_test master-dependent-ban "Don't stop instances from being active because a dependent is banned from that host" do_test master-stop "Stop instances due to location constraint with role=Started" do_test master-partially-demoted-group "Allow partially demoted group to finish demoting" do_test bug-cl-5213 "Ensure role colocation with -INFINITY is enforced" do_test bug-cl-5219 "Allow unrelated resources with a common colocation target to remain promoted" do_test master-asymmetrical-order "Fix the behaviors of multi-state resources with asymmetrical ordering" do_test master-notify "Master promotion with notifies" do_test master-score-startup "Use permanent master scores without LRM history" do_test failed-demote-recovery "Recover resource in slave role after demote fails" do_test failed-demote-recovery-master "Recover resource in master role after demote fails" echo "" do_test history-1 "Correctly parse stateful-1 resource state" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource" do_test bug-5028-detach "Ensure detach still works" do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" do_test unmanaged-stop-1 "cl#5155 - Block the stop of resources if any depending resource is unmanaged " do_test unmanaged-stop-2 "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged " do_test unmanaged-stop-3 "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged " do_test unmanaged-stop-4 "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged " do_test unmanaged-block-restart "Block restart of resources if any dependent resource in a group is unmanaged" echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependency restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" do_test notifs-for-unrunnable "Don't schedule notifications for an unrunnable action" do_test route-remote-notify "Route remote notify actions through correct cluster node" do_test notify-behind-stopping-remote "Don't schedule notifications behind stopped remote" echo "" do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition" do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" do_test 696 "OSDL #696 - CRM starts stonith RA without monitor" do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id" do_test 829 "OSDL #829" do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" do_test 994-2 "OSDL #994 - with a dependent resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test unrunnable-2 "Unrunnable 2" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test stonith-4 "Stonith node state" +do_test dc-fence-ordering "DC needs fencing while other nodes are shutting down" do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Don't promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" do_test colocate-primitive-with-clone "Optional colocation with a clone" do_test use-after-free-merge "Use-after-free in native_merge_weights" do_test bug-lf-2551 "STONITH ordering for stop" do_test bug-lf-2606 "Stonith implies demote" do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults" do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering" do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false" do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false" do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts." do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false" do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false." do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false." do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false." do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false" do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true" do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources." do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases" do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload" do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change." do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart" do_test bug-5025-4 "Clear failcount when last failure is a start op and rsc attributes changed." do_test failcount "Ensure failcounts are correctly expired" do_test failcount-block "Ensure failcounts are not expired when on-fail=block is present" do_test per-op-failcount "Ensure per-operation failcount is handled and not passed to fence agent" do_test on-fail-ignore "Ensure on-fail=ignore works even beyond migration-threshold" do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart" do_test monitor-onfail-stop "bug-5058 - Monitor failure wiht on-fail set to stop" do_test bug-5059 "No need to restart p_stateful1:*" do_test bug-5069-op-enabled "Test on-fail=ignore with failure when monitor is enabled." do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled." do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections" do_test expire-non-blocked-failure "Ignore failure-timeout only if the failed operation has on-fail=block" do_test asymmetrical-order-move "Respect asymmetrical ordering when trying to move resources" do_test asymmetrical-order-restart "Respect asymmetrical ordering when restarting dependent resource" do_test start-then-stop-with-unfence "Avoid graph loop with start-then-stop constraint plus unfencing" do_test order-expired-failure "Order failcount cleanup after remote fencing" do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc." do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith." do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group" do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)." do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group." do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs." do_test multiply-active-stonith do_test probe-timeout "cl#5099 - Default probe timeout" do_test concurrent-fencing "Allow performing fencing operations in parallel" echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" do_test utilization "Placement Strategy - utilization" do_test minimal "Placement Strategy - minimal" do_test balanced "Placement Strategy - balanced" echo "" do_test placement-stickiness "Optimized Placement Strategy - stickiness" do_test placement-priority "Optimized Placement Strategy - priority" do_test placement-location "Optimized Placement Strategy - location" do_test placement-capacity "Optimized Placement Strategy - capacity" echo "" do_test utilization-order1 "Utilization Order - Simple" do_test utilization-order2 "Utilization Order - Complex" do_test utilization-order3 "Utilization Order - Migrate" do_test utilization-order4 "Utilization Order - Live Migration (bnc#695440)" do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)" do_test load-stopped-loop-2 "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" echo "" do_test colocated-utilization-primitive-1 "Colocated Utilization - Primitive" do_test colocated-utilization-primitive-2 "Colocated Utilization - Choose the most capable node" do_test colocated-utilization-group "Colocated Utilization - Group" do_test colocated-utilization-clone "Colocated Utilization - Clone" do_test utilization-check-allowed-nodes "Only check the capacities of the nodes that can run the resource" echo "" do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources" do_test node-maintenance-1 "cl#5128 - Node maintenance" do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)" do_test shutdown-maintenance-node "Do not fence a maintenance node if it shuts down cleanly" do_test rsc-maintenance "Per-resource maintenance" echo "" do_test not-installed-agent "The resource agent is missing" do_test not-installed-tools "Something the resource agent needs is missing" echo "" do_test stopped-monitor-00 "Stopped Monitor - initial start" do_test stopped-monitor-01 "Stopped Monitor - failed started" do_test stopped-monitor-02 "Stopped Monitor - started multi-up" do_test stopped-monitor-03 "Stopped Monitor - stop started" do_test stopped-monitor-04 "Stopped Monitor - failed stop" do_test stopped-monitor-05 "Stopped Monitor - start unmanaged" do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up" do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up" do_test stopped-monitor-08 "Stopped Monitor - migrate" do_test stopped-monitor-09 "Stopped Monitor - unmanage started" do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up" do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started" do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" do_test stopped-monitor-20 "Stopped Monitor - initial stop" do_test stopped-monitor-21 "Stopped Monitor - stopped single-up" do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up" do_test stopped-monitor-23 "Stopped Monitor - start stopped" do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped" do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up" do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped" do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" do_test stopped-monitor-30 "Stopped Monitor - new node started" do_test stopped-monitor-31 "Stopped Monitor - new node stopped" echo "" # This is a combo test to check: # - probe timeout defaults to the minimum-interval monitor's # - duplicate recurring operations are ignored # - if timeout spec is bad, the default timeout is used # - failure is blocked with on-fail=block even if ISO8601 interval is specified # - started/stopped role monitors are started/stopped on right nodes do_test intervals "Recurring monitor interval handling" echo"" do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)" do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)" do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)" do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)" do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)" do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)" do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)" do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)" do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)" do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)" do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)" do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)" do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)" do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)" do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)" do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)" do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)" do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)" do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)" do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)" do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)" do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)" do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)" do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)" do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)" do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)" do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)" do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)" do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)" do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)" do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)" do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)" do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)" do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)" do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)" do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)" do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)" do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)" do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)" do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)" do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)" do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)" do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)" do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)" do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)" do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)" do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)" do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)" do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)" do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)" do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)" do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)" do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)" do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)" do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)" do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)" do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)" do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)" do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)" do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)" do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)" do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)" do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)" do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)" do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)" do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)" do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)" do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)" do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)" do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)" echo"" do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)" do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)" do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)" do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)" do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)" do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)" do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)" do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)" do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)" do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)" do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)" do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)" do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)" do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)" do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)" do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)" do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)" do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)" do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)" do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)" do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)" do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)" do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)" do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)" echo "" do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)" do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)" do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)" do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)" do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)" do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)" do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)" do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)" do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)" do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)" do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)" do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)" do_test cluster-specific-params "Cluster-specific instance attributes based on rules" do_test site-specific-params "Site-specific instance attributes based on rules" echo "" do_test template-1 "Template - 1" do_test template-2 "Template - 2" do_test template-3 "Template - 3 (merge operations)" do_test template-coloc-1 "Template - Colocation 1" do_test template-coloc-2 "Template - Colocation 2" do_test template-coloc-3 "Template - Colocation 3" do_test template-order-1 "Template - Order 1" do_test template-order-2 "Template - Order 2" do_test template-order-3 "Template - Order 3" do_test template-ticket "Template - Ticket" do_test template-rsc-sets-1 "Template - Resource Sets 1" do_test template-rsc-sets-2 "Template - Resource Sets 2" do_test template-rsc-sets-3 "Template - Resource Sets 3" do_test template-rsc-sets-4 "Template - Resource Sets 4" do_test template-clone-primitive "Cloned primitive from template" do_test template-clone-group "Cloned group from template" do_test location-sets-templates "Resource sets and templates - Location" do_test tags-coloc-order-1 "Tags - Colocation and Order (Simple)" do_test tags-coloc-order-2 "Tags - Colocation and Order (Resource Sets with Templates)" do_test tags-location "Tags - Location" do_test tags-ticket "Tags - Ticket" echo "" do_test container-1 "Container - initial" do_test container-2 "Container - monitor failed" do_test container-3 "Container - stop failed" do_test container-4 "Container - reached migration-threshold" do_test container-group-1 "Container in group - initial" do_test container-group-2 "Container in group - monitor failed" do_test container-group-3 "Container in group - stop failed" do_test container-group-4 "Container in group - reached migration-threshold" do_test container-is-remote-node "Place resource within container when container is remote-node" do_test bug-rh-1097457 "Kill user defined container/contents ordering" do_test bug-cl-5247 "Graph loop when recovering m/s resource in a container" do_test bundle-order-startup "Bundle startup ordering" do_test bundle-order-partial-start "Bundle startup ordering when some dependancies are already running" do_test bundle-order-partial-start-2 "Bundle startup ordering when some dependancies and the container are already running" do_test bundle-order-stop "Bundle stop ordering" do_test bundle-order-partial-stop "Bundle startup ordering when some dependancies are already stopped" do_test bundle-order-stop-on-remote "Stop nested resource after bringing up the connection" do_test bundle-order-startup-clone "Prevent startup because bundle isn't promoted" do_test bundle-order-startup-clone-2 "Bundle startup with clones" do_test bundle-order-stop-clone "Stop bundle because clone is stopping" do_test bundle-nested-colocation "Colocation of nested connection resources" do_test bundle-order-fencing "Order pseudo bundle fencing after parent node fencing if both are happening" do_test bundle-probe-order-1 "order 1" do_test bundle-probe-order-2 "order 2" do_test bundle-probe-order-3 "order 3" do_test bundle-probe-remotes "Ensure remotes get probed too" do_test bundle-replicas-change "Change bundle from 1 replica to multiple" do_test nested-remote-recovery "Recover bundle's container hosted on remote node" echo "" do_test whitebox-fail1 "Fail whitebox container rsc." do_test whitebox-fail2 "Fail cluster connection to guest node" do_test whitebox-fail3 "Failed containers should not run nested on remote nodes." do_test whitebox-start "Start whitebox container with resources assigned to it" do_test whitebox-stop "Stop whitebox container with resources assigned to it" do_test whitebox-move "Move whitebox container with resources assigned to it" do_test whitebox-asymmetric "Verify connection rsc opts-in based on container resource" do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established" do_test whitebox-ms-ordering-move "Stop/Start cycle within a moving container" do_test whitebox-orphaned "Properly shutdown orphaned whitebox container" do_test whitebox-orphan-ms "Properly tear down orphan ms resources on remote-nodes" do_test whitebox-unexpectedly-running "Recover container nodes the cluster did not start." do_test whitebox-migrate1 "Migrate both container and connection resource" do_test whitebox-imply-stop-on-fence "imply stop action on container node rsc when host node is fenced" do_test whitebox-nested-group "Verify guest remote-node works nested in a group" do_test guest-node-host-dies "Verify guest node is recovered if host goes away" do_test guest-node-cleanup "Order guest node connection recovery after container probe" echo "" do_test remote-startup-probes "Baremetal remote-node startup probes" do_test remote-startup "Startup a newly discovered remote-nodes with no status." do_test remote-fence-unclean "Fence unclean baremetal remote-node" do_test remote-fence-unclean2 "Fence baremetal remote-node after cluster node fails and connection can not be recovered" do_test remote-fence-unclean-3 "Probe failed remote nodes (triggers fencing)" do_test remote-move "Move remote-node connection resource" do_test remote-disable "Disable a baremetal remote-node" do_test remote-probe-disable "Probe then stop a baremetal remote-node" do_test remote-orphaned "Properly shutdown orphaned connection resource" do_test remote-orphaned2 "verify we can handle orphaned remote connections with active resources on the remote" do_test remote-recover "Recover connection resource after cluster-node fails." do_test remote-stale-node-entry "Make sure we properly handle leftover remote-node entries in the node section" do_test remote-partial-migrate "Make sure partial migrations are handled before ops on the remote node." do_test remote-partial-migrate2 "Make sure partial migration target is prefered for remote connection." do_test remote-recover-fail "Make sure start failure causes fencing if rsc are active on remote." do_test remote-start-fail "Make sure a start failure does not result in fencing if no active resources are on remote." do_test remote-unclean2 "Make monitor failure always results in fencing, even if no rsc are active on remote." do_test remote-fence-before-reconnect "Fence before clearing recurring monitor failure" do_test remote-recovery "Recover remote connections before attempting demotion" do_test remote-recover-connection "Optimistically recovery of only the connection" do_test remote-recover-all "Fencing when the connection has no home" do_test remote-recover-no-resources "Fencing when the connection has no home and no active resources" do_test remote-recover-unknown "Fencing when the connection has no home and the remote has no operation history" do_test remote-reconnect-delay "Waiting for remote reconnect interval to expire" do_test remote-connection-unrecoverable "Remote connection host must be fenced, with connection unrecoverable" echo "" do_test resource-discovery "Exercises resource-discovery location constraint option." do_test rsc-discovery-per-node "Disable resource discovery per node" if [ $DO_VERSIONED_TESTS -eq 1 ]; then echo "" do_test versioned-resources "Start resources with #ra-version rules" do_test restart-versioned "Restart resources on #ra-version change" do_test reload-versioned "Reload resources on #ra-version change" echo "" do_test versioned-operations-1 "Use #ra-version to configure operations of native resources" do_test versioned-operations-2 "Use #ra-version to configure operations of stonith resources" do_test versioned-operations-3 "Use #ra-version to configure operations of master/slave resources" do_test versioned-operations-4 "Use #ra-version to configure operations of groups of the resources" fi echo "" test_results exit $EXITCODE diff --git a/cts/scheduler/concurrent-fencing.exp b/cts/scheduler/concurrent-fencing.exp index f479556a84..23295e3fe5 100644 --- a/cts/scheduler/concurrent-fencing.exp +++ b/cts/scheduler/concurrent-fencing.exp @@ -1,42 +1,42 @@ - + diff --git a/cts/scheduler/dc-fence-ordering.dot b/cts/scheduler/dc-fence-ordering.dot new file mode 100644 index 0000000000..b700755a71 --- /dev/null +++ b/cts/scheduler/dc-fence-ordering.dot @@ -0,0 +1,49 @@ +digraph "g" { +"do_shutdown rhel7-2" [ style=bold color="green" fontcolor="black"] +"do_shutdown rhel7-4" [ style=bold color="green" fontcolor="black"] +"do_shutdown rhel7-5" [ style=bold color="green" fontcolor="black"] +"group-1_stop_0" -> "group-1_stopped_0" [ style = bold] +"group-1_stop_0" -> "petulant_stop_0 rhel7-1" [ style = bold] +"group-1_stop_0" -> "r192.168.122.207_stop_0 rhel7-1" [ style = bold] +"group-1_stop_0" [ style=bold color="green" fontcolor="orange"] +"group-1_stopped_0" -> "promotable-1_demote_0" [ style = bold] +"group-1_stopped_0" [ style=bold color="green" fontcolor="orange"] +"petulant_stop_0 rhel7-1" -> "group-1_stopped_0" [ style = bold] +"petulant_stop_0 rhel7-1" -> "r192.168.122.207_stop_0 rhel7-1" [ style = bold] +"petulant_stop_0 rhel7-1" [ style=bold color="green" fontcolor="orange"] +"promotable-1_demote_0" -> "promotable-1_demoted_0" [ style = bold] +"promotable-1_demote_0" -> "stateful-1_demote_0 rhel7-1" [ style = bold] +"promotable-1_demote_0" [ style=bold color="green" fontcolor="orange"] +"promotable-1_demoted_0" -> "promotable-1_stop_0" [ style = bold] +"promotable-1_demoted_0" [ style=bold color="green" fontcolor="orange"] +"promotable-1_stop_0" -> "promotable-1_stopped_0" [ style = bold] +"promotable-1_stop_0" -> "stateful-1_stop_0 rhel7-1" [ style = bold] +"promotable-1_stop_0" -> "stateful-1_stop_0 rhel7-2" [ style = bold] +"promotable-1_stop_0" -> "stateful-1_stop_0 rhel7-4" [ style = bold] +"promotable-1_stop_0" -> "stateful-1_stop_0 rhel7-5" [ style = bold] +"promotable-1_stop_0" [ style=bold color="green" fontcolor="orange"] +"promotable-1_stopped_0" [ style=bold color="green" fontcolor="orange"] +"r192.168.122.207_stop_0 rhel7-1" -> "group-1_stopped_0" [ style = bold] +"r192.168.122.207_stop_0 rhel7-1" [ style=bold color="green" fontcolor="orange"] +"stateful-1_demote_0 rhel7-1" -> "promotable-1_demoted_0" [ style = bold] +"stateful-1_demote_0 rhel7-1" -> "stateful-1_stop_0 rhel7-1" [ style = bold] +"stateful-1_demote_0 rhel7-1" [ style=bold color="green" fontcolor="orange"] +"stateful-1_stop_0 rhel7-1" -> "promotable-1_stopped_0" [ style = bold] +"stateful-1_stop_0 rhel7-1" [ style=bold color="green" fontcolor="orange"] +"stateful-1_stop_0 rhel7-2" -> "do_shutdown rhel7-2" [ style = bold] +"stateful-1_stop_0 rhel7-2" -> "promotable-1_stopped_0" [ style = bold] +"stateful-1_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] +"stateful-1_stop_0 rhel7-4" -> "do_shutdown rhel7-4" [ style = bold] +"stateful-1_stop_0 rhel7-4" -> "promotable-1_stopped_0" [ style = bold] +"stateful-1_stop_0 rhel7-4" [ style=bold color="green" fontcolor="black"] +"stateful-1_stop_0 rhel7-5" -> "do_shutdown rhel7-5" [ style = bold] +"stateful-1_stop_0 rhel7-5" -> "promotable-1_stopped_0" [ style = bold] +"stateful-1_stop_0 rhel7-5" [ style=bold color="green" fontcolor="black"] +"stonith 'reboot' rhel7-1" -> "group-1_stop_0" [ style = bold] +"stonith 'reboot' rhel7-1" -> "petulant_stop_0 rhel7-1" [ style = bold] +"stonith 'reboot' rhel7-1" -> "promotable-1_stop_0" [ style = bold] +"stonith 'reboot' rhel7-1" -> "r192.168.122.207_stop_0 rhel7-1" [ style = bold] +"stonith 'reboot' rhel7-1" -> "stateful-1_demote_0 rhel7-1" [ style = bold] +"stonith 'reboot' rhel7-1" -> "stateful-1_stop_0 rhel7-1" [ style = bold] +"stonith 'reboot' rhel7-1" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dc-fence-ordering.exp b/cts/scheduler/dc-fence-ordering.exp new file mode 100644 index 0000000000..429db5a6d7 --- /dev/null +++ b/cts/scheduler/dc-fence-ordering.exp @@ -0,0 +1,259 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/dc-fence-ordering.scores b/cts/scheduler/dc-fence-ordering.scores new file mode 100644 index 0000000000..92cdb26642 --- /dev/null +++ b/cts/scheduler/dc-fence-ordering.scores @@ -0,0 +1,202 @@ +Allocation scores: +Using the original execution date of: 2018-11-28 18:37:16Z +clone_color: Connectivity allocation score on rhel7-1: 0 +clone_color: Connectivity allocation score on rhel7-2: 0 +clone_color: Connectivity allocation score on rhel7-3: 0 +clone_color: Connectivity allocation score on rhel7-4: 0 +clone_color: Connectivity allocation score on rhel7-5: 0 +clone_color: ping-1:0 allocation score on rhel7-1: 0 +clone_color: ping-1:0 allocation score on rhel7-2: 0 +clone_color: ping-1:0 allocation score on rhel7-3: 0 +clone_color: ping-1:0 allocation score on rhel7-4: 0 +clone_color: ping-1:0 allocation score on rhel7-5: 0 +clone_color: ping-1:1 allocation score on rhel7-1: 0 +clone_color: ping-1:1 allocation score on rhel7-2: 0 +clone_color: ping-1:1 allocation score on rhel7-3: 0 +clone_color: ping-1:1 allocation score on rhel7-4: 0 +clone_color: ping-1:1 allocation score on rhel7-5: 0 +clone_color: ping-1:2 allocation score on rhel7-1: 0 +clone_color: ping-1:2 allocation score on rhel7-2: 0 +clone_color: ping-1:2 allocation score on rhel7-3: 0 +clone_color: ping-1:2 allocation score on rhel7-4: 0 +clone_color: ping-1:2 allocation score on rhel7-5: 0 +clone_color: ping-1:3 allocation score on rhel7-1: 0 +clone_color: ping-1:3 allocation score on rhel7-2: 0 +clone_color: ping-1:3 allocation score on rhel7-3: 0 +clone_color: ping-1:3 allocation score on rhel7-4: 0 +clone_color: ping-1:3 allocation score on rhel7-5: 0 +clone_color: ping-1:4 allocation score on rhel7-1: 0 +clone_color: ping-1:4 allocation score on rhel7-2: 0 +clone_color: ping-1:4 allocation score on rhel7-3: 0 +clone_color: ping-1:4 allocation score on rhel7-4: 0 +clone_color: ping-1:4 allocation score on rhel7-5: 0 +clone_color: promotable-1 allocation score on rhel7-1: -INFINITY +clone_color: promotable-1 allocation score on rhel7-2: -INFINITY +clone_color: promotable-1 allocation score on rhel7-3: -INFINITY +clone_color: promotable-1 allocation score on rhel7-4: -INFINITY +clone_color: promotable-1 allocation score on rhel7-5: -INFINITY +clone_color: stateful-1:0 allocation score on rhel7-1: -INFINITY +clone_color: stateful-1:0 allocation score on rhel7-2: -INFINITY +clone_color: stateful-1:0 allocation score on rhel7-3: -INFINITY +clone_color: stateful-1:0 allocation score on rhel7-4: -INFINITY +clone_color: stateful-1:0 allocation score on rhel7-5: -INFINITY +clone_color: stateful-1:1 allocation score on rhel7-1: -INFINITY +clone_color: stateful-1:1 allocation score on rhel7-2: -INFINITY +clone_color: stateful-1:1 allocation score on rhel7-3: -INFINITY +clone_color: stateful-1:1 allocation score on rhel7-4: -INFINITY +clone_color: stateful-1:1 allocation score on rhel7-5: -INFINITY +clone_color: stateful-1:2 allocation score on rhel7-1: -INFINITY +clone_color: stateful-1:2 allocation score on rhel7-2: -INFINITY +clone_color: stateful-1:2 allocation score on rhel7-3: -INFINITY +clone_color: stateful-1:2 allocation score on rhel7-4: -INFINITY +clone_color: stateful-1:2 allocation score on rhel7-5: -INFINITY +clone_color: stateful-1:3 allocation score on rhel7-1: -INFINITY +clone_color: stateful-1:3 allocation score on rhel7-2: -INFINITY +clone_color: stateful-1:3 allocation score on rhel7-3: -INFINITY +clone_color: stateful-1:3 allocation score on rhel7-4: -INFINITY +clone_color: stateful-1:3 allocation score on rhel7-5: -INFINITY +clone_color: stateful-1:4 allocation score on rhel7-1: -INFINITY +clone_color: stateful-1:4 allocation score on rhel7-2: -INFINITY +clone_color: stateful-1:4 allocation score on rhel7-3: -INFINITY +clone_color: stateful-1:4 allocation score on rhel7-4: -INFINITY +clone_color: stateful-1:4 allocation score on rhel7-5: -INFINITY +group_color: group-1 allocation score on rhel7-1: 0 +group_color: group-1 allocation score on rhel7-2: 0 +group_color: group-1 allocation score on rhel7-3: 0 +group_color: group-1 allocation score on rhel7-4: 0 +group_color: group-1 allocation score on rhel7-5: 0 +group_color: petulant allocation score on rhel7-1: -INFINITY +group_color: petulant allocation score on rhel7-2: 0 +group_color: petulant allocation score on rhel7-3: 0 +group_color: petulant allocation score on rhel7-4: 0 +group_color: petulant allocation score on rhel7-5: 0 +group_color: r192.168.122.207 allocation score on rhel7-1: 0 +group_color: r192.168.122.207 allocation score on rhel7-2: 0 +group_color: r192.168.122.207 allocation score on rhel7-3: 0 +group_color: r192.168.122.207 allocation score on rhel7-4: 0 +group_color: r192.168.122.207 allocation score on rhel7-5: 0 +group_color: r192.168.122.208 allocation score on rhel7-1: 0 +group_color: r192.168.122.208 allocation score on rhel7-2: 0 +group_color: r192.168.122.208 allocation score on rhel7-3: 0 +group_color: r192.168.122.208 allocation score on rhel7-4: 0 +group_color: r192.168.122.208 allocation score on rhel7-5: 0 +native_color: Fencing allocation score on rhel7-1: 0 +native_color: Fencing allocation score on rhel7-2: 0 +native_color: Fencing allocation score on rhel7-3: 0 +native_color: Fencing allocation score on rhel7-4: 0 +native_color: Fencing allocation score on rhel7-5: 0 +native_color: FencingFail allocation score on rhel7-1: 0 +native_color: FencingFail allocation score on rhel7-2: 0 +native_color: FencingFail allocation score on rhel7-3: 0 +native_color: FencingFail allocation score on rhel7-4: 0 +native_color: FencingFail allocation score on rhel7-5: 0 +native_color: FencingPass allocation score on rhel7-1: 0 +native_color: FencingPass allocation score on rhel7-2: 0 +native_color: FencingPass allocation score on rhel7-3: 0 +native_color: FencingPass allocation score on rhel7-4: 0 +native_color: FencingPass allocation score on rhel7-5: 0 +native_color: lsb-dummy allocation score on rhel7-1: -INFINITY +native_color: lsb-dummy allocation score on rhel7-2: -INFINITY +native_color: lsb-dummy allocation score on rhel7-3: -INFINITY +native_color: lsb-dummy allocation score on rhel7-4: -INFINITY +native_color: lsb-dummy allocation score on rhel7-5: -INFINITY +native_color: migrator allocation score on rhel7-1: 0 +native_color: migrator allocation score on rhel7-2: 0 +native_color: migrator allocation score on rhel7-3: 0 +native_color: migrator allocation score on rhel7-4: 0 +native_color: migrator allocation score on rhel7-5: 0 +native_color: petulant allocation score on rhel7-1: -INFINITY +native_color: petulant allocation score on rhel7-2: -INFINITY +native_color: petulant allocation score on rhel7-3: -INFINITY +native_color: petulant allocation score on rhel7-4: -INFINITY +native_color: petulant allocation score on rhel7-5: -INFINITY +native_color: ping-1:0 allocation score on rhel7-1: -INFINITY +native_color: ping-1:0 allocation score on rhel7-2: -INFINITY +native_color: ping-1:0 allocation score on rhel7-3: -INFINITY +native_color: ping-1:0 allocation score on rhel7-4: -INFINITY +native_color: ping-1:0 allocation score on rhel7-5: -INFINITY +native_color: ping-1:1 allocation score on rhel7-1: -INFINITY +native_color: ping-1:1 allocation score on rhel7-2: -INFINITY +native_color: ping-1:1 allocation score on rhel7-3: -INFINITY +native_color: ping-1:1 allocation score on rhel7-4: -INFINITY +native_color: ping-1:1 allocation score on rhel7-5: -INFINITY +native_color: ping-1:2 allocation score on rhel7-1: -INFINITY +native_color: ping-1:2 allocation score on rhel7-2: -INFINITY +native_color: ping-1:2 allocation score on rhel7-3: -INFINITY +native_color: ping-1:2 allocation score on rhel7-4: -INFINITY +native_color: ping-1:2 allocation score on rhel7-5: -INFINITY +native_color: ping-1:3 allocation score on rhel7-1: -INFINITY +native_color: ping-1:3 allocation score on rhel7-2: -INFINITY +native_color: ping-1:3 allocation score on rhel7-3: -INFINITY +native_color: ping-1:3 allocation score on rhel7-4: -INFINITY +native_color: ping-1:3 allocation score on rhel7-5: -INFINITY +native_color: ping-1:4 allocation score on rhel7-1: -INFINITY +native_color: ping-1:4 allocation score on rhel7-2: -INFINITY +native_color: ping-1:4 allocation score on rhel7-3: -INFINITY +native_color: ping-1:4 allocation score on rhel7-4: -INFINITY +native_color: ping-1:4 allocation score on rhel7-5: -INFINITY +native_color: r192.168.122.207 allocation score on rhel7-1: -INFINITY +native_color: r192.168.122.207 allocation score on rhel7-2: -INFINITY +native_color: r192.168.122.207 allocation score on rhel7-3: -INFINITY +native_color: r192.168.122.207 allocation score on rhel7-4: -INFINITY +native_color: r192.168.122.207 allocation score on rhel7-5: -INFINITY +native_color: r192.168.122.208 allocation score on rhel7-1: -INFINITY +native_color: r192.168.122.208 allocation score on rhel7-2: -INFINITY +native_color: r192.168.122.208 allocation score on rhel7-3: -INFINITY +native_color: r192.168.122.208 allocation score on rhel7-4: -INFINITY +native_color: r192.168.122.208 allocation score on rhel7-5: -INFINITY +native_color: rsc_rhel7-1 allocation score on rhel7-1: 100 +native_color: rsc_rhel7-1 allocation score on rhel7-2: 0 +native_color: rsc_rhel7-1 allocation score on rhel7-3: 0 +native_color: rsc_rhel7-1 allocation score on rhel7-4: 0 +native_color: rsc_rhel7-1 allocation score on rhel7-5: 0 +native_color: rsc_rhel7-2 allocation score on rhel7-1: 0 +native_color: rsc_rhel7-2 allocation score on rhel7-2: 100 +native_color: rsc_rhel7-2 allocation score on rhel7-3: 0 +native_color: rsc_rhel7-2 allocation score on rhel7-4: 0 +native_color: rsc_rhel7-2 allocation score on rhel7-5: 0 +native_color: rsc_rhel7-3 allocation score on rhel7-1: 0 +native_color: rsc_rhel7-3 allocation score on rhel7-2: 0 +native_color: rsc_rhel7-3 allocation score on rhel7-3: 100 +native_color: rsc_rhel7-3 allocation score on rhel7-4: 0 +native_color: rsc_rhel7-3 allocation score on rhel7-5: 0 +native_color: rsc_rhel7-4 allocation score on rhel7-1: 0 +native_color: rsc_rhel7-4 allocation score on rhel7-2: 0 +native_color: rsc_rhel7-4 allocation score on rhel7-3: 0 +native_color: rsc_rhel7-4 allocation score on rhel7-4: 100 +native_color: rsc_rhel7-4 allocation score on rhel7-5: 0 +native_color: rsc_rhel7-5 allocation score on rhel7-1: 0 +native_color: rsc_rhel7-5 allocation score on rhel7-2: 0 +native_color: rsc_rhel7-5 allocation score on rhel7-3: 0 +native_color: rsc_rhel7-5 allocation score on rhel7-4: 0 +native_color: rsc_rhel7-5 allocation score on rhel7-5: 100 +native_color: stateful-1:0 allocation score on rhel7-1: -INFINITY +native_color: stateful-1:0 allocation score on rhel7-2: -INFINITY +native_color: stateful-1:0 allocation score on rhel7-3: -INFINITY +native_color: stateful-1:0 allocation score on rhel7-4: -INFINITY +native_color: stateful-1:0 allocation score on rhel7-5: -INFINITY +native_color: stateful-1:1 allocation score on rhel7-1: -INFINITY +native_color: stateful-1:1 allocation score on rhel7-2: -INFINITY +native_color: stateful-1:1 allocation score on rhel7-3: -INFINITY +native_color: stateful-1:1 allocation score on rhel7-4: -INFINITY +native_color: stateful-1:1 allocation score on rhel7-5: -INFINITY +native_color: stateful-1:2 allocation score on rhel7-1: -INFINITY +native_color: stateful-1:2 allocation score on rhel7-2: -INFINITY +native_color: stateful-1:2 allocation score on rhel7-3: -INFINITY +native_color: stateful-1:2 allocation score on rhel7-4: -INFINITY +native_color: stateful-1:2 allocation score on rhel7-5: -INFINITY +native_color: stateful-1:3 allocation score on rhel7-1: -INFINITY +native_color: stateful-1:3 allocation score on rhel7-2: -INFINITY +native_color: stateful-1:3 allocation score on rhel7-3: -INFINITY +native_color: stateful-1:3 allocation score on rhel7-4: -INFINITY +native_color: stateful-1:3 allocation score on rhel7-5: -INFINITY +native_color: stateful-1:4 allocation score on rhel7-1: -INFINITY +native_color: stateful-1:4 allocation score on rhel7-2: -INFINITY +native_color: stateful-1:4 allocation score on rhel7-3: -INFINITY +native_color: stateful-1:4 allocation score on rhel7-4: -INFINITY +native_color: stateful-1:4 allocation score on rhel7-5: -INFINITY +stateful-1:0 promotion score on none: 0 +stateful-1:1 promotion score on none: 0 +stateful-1:2 promotion score on none: 0 +stateful-1:3 promotion score on none: 0 +stateful-1:4 promotion score on none: 0 diff --git a/cts/scheduler/dc-fence-ordering.summary b/cts/scheduler/dc-fence-ordering.summary new file mode 100644 index 0000000000..82e50960dc --- /dev/null +++ b/cts/scheduler/dc-fence-ordering.summary @@ -0,0 +1,83 @@ +Using the original execution date of: 2018-11-28 18:37:16Z + +Current cluster status: +Node rhel7-1 (1): UNCLEAN (online) +Online: [ rhel7-2 rhel7-4 rhel7-5 ] +OFFLINE: [ rhel7-3 ] + + Fencing (stonith:fence_xvm): Stopped + FencingPass (stonith:fence_dummy): Stopped + FencingFail (stonith:fence_dummy): Stopped + rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Stopped + rsc_rhel7-2 (ocf::heartbeat:IPaddr2): Stopped + rsc_rhel7-3 (ocf::heartbeat:IPaddr2): Stopped + rsc_rhel7-4 (ocf::heartbeat:IPaddr2): Stopped + rsc_rhel7-5 (ocf::heartbeat:IPaddr2): Stopped + migrator (ocf::pacemaker:Dummy): Stopped + Clone Set: Connectivity [ping-1] + Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] + Clone Set: promotable-1 [stateful-1] (promotable) + Masters: [ rhel7-1 ] + Slaves: [ rhel7-2 rhel7-4 rhel7-5 ] + Stopped: [ rhel7-3 ] + Resource Group: group-1 + r192.168.122.207 (ocf::heartbeat:IPaddr2): Started rhel7-1 + petulant (service:pacemaker-cts-dummyd@10): FAILED rhel7-1 + r192.168.122.208 (ocf::heartbeat:IPaddr2): Stopped + lsb-dummy (lsb:LSBDummy): Stopped + +Transition Summary: + * Shutdown rhel7-5 + * Shutdown rhel7-4 + * Shutdown rhel7-2 + * Fence (reboot) rhel7-1 'petulant failed there' + * Stop stateful-1:0 ( Slave rhel7-5 ) due to node availability + * Stop stateful-1:1 ( Master rhel7-1 ) due to node availability + * Stop stateful-1:2 ( Slave rhel7-2 ) due to node availability + * Stop stateful-1:3 ( Slave rhel7-4 ) due to node availability + * Stop r192.168.122.207 ( rhel7-1 ) due to node availability + * Stop petulant ( rhel7-1 ) due to node availability + +Executing cluster transition: + * Fencing rhel7-1 (reboot) + * Pseudo action: group-1_stop_0 + * Pseudo action: petulant_stop_0 + * Pseudo action: r192.168.122.207_stop_0 + * Pseudo action: group-1_stopped_0 + * Pseudo action: promotable-1_demote_0 + * Pseudo action: stateful-1_demote_0 + * Pseudo action: promotable-1_demoted_0 + * Pseudo action: promotable-1_stop_0 + * Resource action: stateful-1 stop on rhel7-5 + * Pseudo action: stateful-1_stop_0 + * Resource action: stateful-1 stop on rhel7-2 + * Resource action: stateful-1 stop on rhel7-4 + * Pseudo action: promotable-1_stopped_0 + * Cluster action: do_shutdown on rhel7-5 + * Cluster action: do_shutdown on rhel7-4 + * Cluster action: do_shutdown on rhel7-2 +Using the original execution date of: 2018-11-28 18:37:16Z + +Revised cluster status: +Online: [ rhel7-2 rhel7-4 rhel7-5 ] +OFFLINE: [ rhel7-1 rhel7-3 ] + + Fencing (stonith:fence_xvm): Stopped + FencingPass (stonith:fence_dummy): Stopped + FencingFail (stonith:fence_dummy): Stopped + rsc_rhel7-1 (ocf::heartbeat:IPaddr2): Stopped + rsc_rhel7-2 (ocf::heartbeat:IPaddr2): Stopped + rsc_rhel7-3 (ocf::heartbeat:IPaddr2): Stopped + rsc_rhel7-4 (ocf::heartbeat:IPaddr2): Stopped + rsc_rhel7-5 (ocf::heartbeat:IPaddr2): Stopped + migrator (ocf::pacemaker:Dummy): Stopped + Clone Set: Connectivity [ping-1] + Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] + Clone Set: promotable-1 [stateful-1] (promotable) + Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] + Resource Group: group-1 + r192.168.122.207 (ocf::heartbeat:IPaddr2): Stopped + petulant (service:pacemaker-cts-dummyd@10): Stopped + r192.168.122.208 (ocf::heartbeat:IPaddr2): Stopped + lsb-dummy (lsb:LSBDummy): Stopped + diff --git a/cts/scheduler/dc-fence-ordering.xml b/cts/scheduler/dc-fence-ordering.xml new file mode 100644 index 0000000000..3ce32045fe --- /dev/null +++ b/cts/scheduler/dc-fence-ordering.xml @@ -0,0 +1,551 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/migrate-fencing.exp b/cts/scheduler/migrate-fencing.exp index 0a5103a1c0..84596e71b5 100644 --- a/cts/scheduler/migrate-fencing.exp +++ b/cts/scheduler/migrate-fencing.exp @@ -1,634 +1,634 @@ - + diff --git a/cts/scheduler/per-op-failcount.exp b/cts/scheduler/per-op-failcount.exp index d86e486254..2a4eec5204 100644 --- a/cts/scheduler/per-op-failcount.exp +++ b/cts/scheduler/per-op-failcount.exp @@ -1,75 +1,75 @@ - + diff --git a/cts/scheduler/rec-node-13.exp b/cts/scheduler/rec-node-13.exp index 8c5593b95f..782475d1b2 100644 --- a/cts/scheduler/rec-node-13.exp +++ b/cts/scheduler/rec-node-13.exp @@ -1,55 +1,55 @@ - + diff --git a/cts/scheduler/rec-node-14.exp b/cts/scheduler/rec-node-14.exp index e95918f236..5520755924 100644 --- a/cts/scheduler/rec-node-14.exp +++ b/cts/scheduler/rec-node-14.exp @@ -1,43 +1,43 @@ - + diff --git a/cts/scheduler/rec-node-15.exp b/cts/scheduler/rec-node-15.exp index 1207b6412a..c3ee5b750b 100644 --- a/cts/scheduler/rec-node-15.exp +++ b/cts/scheduler/rec-node-15.exp @@ -1,451 +1,451 @@ - + diff --git a/cts/scheduler/stonith-0.exp b/cts/scheduler/stonith-0.exp index 92853d85a5..6d286d39d1 100644 --- a/cts/scheduler/stonith-0.exp +++ b/cts/scheduler/stonith-0.exp @@ -1,411 +1,411 @@ - + diff --git a/cts/scheduler/suicide-needed-inquorate.exp b/cts/scheduler/suicide-needed-inquorate.exp index 58619aab52..bd0b69cc16 100644 --- a/cts/scheduler/suicide-needed-inquorate.exp +++ b/cts/scheduler/suicide-needed-inquorate.exp @@ -1,43 +1,43 @@ - + diff --git a/cts/scheduler/systemhealth1.exp b/cts/scheduler/systemhealth1.exp index fb8764c4e8..ac8e6ada22 100644 --- a/cts/scheduler/systemhealth1.exp +++ b/cts/scheduler/systemhealth1.exp @@ -1,29 +1,29 @@ - + diff --git a/cts/scheduler/systemhealthm1.exp b/cts/scheduler/systemhealthm1.exp index fb8764c4e8..ac8e6ada22 100644 --- a/cts/scheduler/systemhealthm1.exp +++ b/cts/scheduler/systemhealthm1.exp @@ -1,29 +1,29 @@ - + diff --git a/cts/scheduler/systemhealthn1.exp b/cts/scheduler/systemhealthn1.exp index fb8764c4e8..ac8e6ada22 100644 --- a/cts/scheduler/systemhealthn1.exp +++ b/cts/scheduler/systemhealthn1.exp @@ -1,29 +1,29 @@ - + diff --git a/cts/scheduler/systemhealtho1.exp b/cts/scheduler/systemhealtho1.exp index fb8764c4e8..ac8e6ada22 100644 --- a/cts/scheduler/systemhealtho1.exp +++ b/cts/scheduler/systemhealtho1.exp @@ -1,29 +1,29 @@ - + diff --git a/cts/scheduler/systemhealthp1.exp b/cts/scheduler/systemhealthp1.exp index fb8764c4e8..ac8e6ada22 100644 --- a/cts/scheduler/systemhealthp1.exp +++ b/cts/scheduler/systemhealthp1.exp @@ -1,29 +1,29 @@ - + diff --git a/daemons/execd/cts-exec-helper.c b/daemons/execd/cts-exec-helper.c index 76e6ca4b20..90e5622849 100644 --- a/daemons/execd/cts-exec-helper.c +++ b/daemons/execd/cts-exec-helper.c @@ -1,619 +1,620 @@ /* - * Copyright 2012-2018 David Vossel + * Copyright 2012-2019 David Vossel * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include /* *INDENT-OFF* */ static struct crm_option long_options[] = { {"help", 0, 0, '?'}, {"verbose", 0, 0, 'V', "\t\tPrint out logs and events to screen"}, {"quiet", 0, 0, 'Q', "\t\tSuppress all output to screen"}, {"tls", 0, 0, 'S', "\t\tUse tls backend for local connection"}, {"listen", 1, 0, 'l', "\tListen for a specific event string"}, {"api-call", 1, 0, 'c', "\tDirectly relates to executor API functions"}, {"no-wait", 0, 0, 'w', "\tMake api call and do not wait for result."}, {"is-running", 0, 0, 'R', "\tDetermine if a resource is registered and running."}, {"notify-orig", 0, 0, 'n', "\tOnly notify this client the results of an api action."}, {"notify-changes", 0, 0, 'o', "\tOnly notify client changes to recurring operations."}, {"-spacer-", 1, 0, '-', "\nParameters for api-call option"}, {"action", 1, 0, 'a'}, {"rsc-id", 1, 0, 'r'}, {"cancel-call-id", 1, 0, 'x'}, {"provider", 1, 0, 'P'}, {"class", 1, 0, 'C'}, {"type", 1, 0, 'T'}, {"interval", 1, 0, 'i'}, {"timeout", 1, 0, 't'}, {"start-delay", 1, 0, 's'}, {"param-key", 1, 0, 'k'}, {"param-val", 1, 0, 'v'}, {"-spacer-", 1, 0, '-'}, {0, 0, 0, 0} }; /* *INDENT-ON* */ static cib_t *cib_conn = NULL; static int exec_call_id = 0; static int exec_call_opts = 0; static gboolean start_test(gpointer user_data); static void try_connect(void); static struct { int verbose; int quiet; guint interval_ms; int timeout; int start_delay; int cancel_call_id; int no_wait; int is_running; int no_connect; const char *api_call; const char *rsc_id; const char *provider; const char *class; const char *type; const char *action; const char *listen; lrmd_key_value_t *params; } options; static GMainLoop *mainloop = NULL; static lrmd_t *lrmd_conn = NULL; static char event_buf_v0[1024]; static void test_exit(crm_exit_t exit_code) { lrmd_api_delete(lrmd_conn); crm_exit(exit_code); } #define print_result(result) \ if (!options.quiet) { \ result; \ } \ #define report_event(event) \ snprintf(event_buf_v0, sizeof(event_buf_v0), "NEW_EVENT event_type:%s rsc_id:%s action:%s rc:%s op_status:%s", \ lrmd_event_type2str(event->type), \ event->rsc_id, \ event->op_type ? event->op_type : "none", \ services_ocf_exitcode_str(event->rc), \ services_lrm_status_str(event->op_status)); \ crm_info("%s", event_buf_v0); static void test_shutdown(int nsig) { lrmd_api_delete(lrmd_conn); lrmd_conn = NULL; } static void read_events(lrmd_event_data_t * event) { report_event(event); if (options.listen) { if (safe_str_eq(options.listen, event_buf_v0)) { print_result(printf("LISTEN EVENT SUCCESSFUL\n")); test_exit(CRM_EX_OK); } } if (exec_call_id && (event->call_id == exec_call_id)) { if (event->op_status == 0 && event->rc == 0) { print_result(printf("API-CALL SUCCESSFUL for 'exec'\n")); } else { print_result(printf("API-CALL FAILURE for 'exec', rc:%d lrmd_op_status:%s\n", event->rc, services_lrm_status_str(event->op_status))); test_exit(CRM_EX_ERROR); } if (!options.listen) { test_exit(CRM_EX_OK); } } } static gboolean timeout_err(gpointer data) { print_result(printf("LISTEN EVENT FAILURE - timeout occurred, never found.\n")); test_exit(CRM_EX_TIMEOUT); return FALSE; } static void connection_events(lrmd_event_data_t * event) { int rc = event->connection_rc; if (event->type != lrmd_event_connect) { /* ignore */ return; } if (!rc) { crm_info("Executor client connection established"); start_test(NULL); return; } else { sleep(1); try_connect(); crm_notice("Executor client connection failed"); } } static void try_connect(void) { int tries = 10; static int num_tries = 0; int rc = 0; lrmd_conn->cmds->set_callback(lrmd_conn, connection_events); for (; num_tries < tries; num_tries++) { rc = lrmd_conn->cmds->connect_async(lrmd_conn, "pacemaker-execd", 3000); if (!rc) { return; /* we'll hear back in async callback */ } sleep(1); } print_result(printf("API CONNECTION FAILURE\n")); test_exit(CRM_EX_ERROR); } static gboolean start_test(gpointer user_data) { int rc = 0; if (!options.no_connect) { if (!lrmd_conn->cmds->is_connected(lrmd_conn)) { try_connect(); /* async connect -- this function will get called back into */ return 0; } } lrmd_conn->cmds->set_callback(lrmd_conn, read_events); if (options.timeout) { g_timeout_add(options.timeout, timeout_err, NULL); } if (!options.api_call) { return 0; } if (safe_str_eq(options.api_call, "exec")) { rc = lrmd_conn->cmds->exec(lrmd_conn, options.rsc_id, options.action, NULL, options.interval_ms, options.timeout, options.start_delay, exec_call_opts, options.params); if (rc > 0) { exec_call_id = rc; print_result(printf("API-CALL 'exec' action pending, waiting on response\n")); } } else if (safe_str_eq(options.api_call, "register_rsc")) { rc = lrmd_conn->cmds->register_rsc(lrmd_conn, options.rsc_id, options.class, options.provider, options.type, 0); } else if (safe_str_eq(options.api_call, "get_rsc_info")) { lrmd_rsc_info_t *rsc_info; rsc_info = lrmd_conn->cmds->get_rsc_info(lrmd_conn, options.rsc_id, 0); if (rsc_info) { print_result(printf("RSC_INFO: id:%s class:%s provider:%s type:%s\n", rsc_info->id, rsc_info->standard, rsc_info->provider ? rsc_info->provider : "", rsc_info->type)); lrmd_free_rsc_info(rsc_info); rc = pcmk_ok; } else { rc = -1; } } else if (safe_str_eq(options.api_call, "unregister_rsc")) { rc = lrmd_conn->cmds->unregister_rsc(lrmd_conn, options.rsc_id, 0); } else if (safe_str_eq(options.api_call, "cancel")) { rc = lrmd_conn->cmds->cancel(lrmd_conn, options.rsc_id, options.action, options.interval_ms); } else if (safe_str_eq(options.api_call, "metadata")) { char *output = NULL; rc = lrmd_conn->cmds->get_metadata(lrmd_conn, options.class, options.provider, options.type, &output, 0); if (rc == pcmk_ok) { print_result(printf("%s", output)); free(output); } } else if (safe_str_eq(options.api_call, "list_agents")) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, options.class, options.provider); if (rc > 0) { print_result(printf("%d agents found\n", rc)); for (iter = list; iter != NULL; iter = iter->next) { print_result(printf("%s\n", iter->val)); } lrmd_list_freeall(list); rc = 0; } else { print_result(printf("API_CALL FAILURE - no agents found\n")); rc = -1; } } else if (safe_str_eq(options.api_call, "list_ocf_providers")) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, options.type, &list); if (rc > 0) { print_result(printf("%d providers found\n", rc)); for (iter = list; iter != NULL; iter = iter->next) { print_result(printf("%s\n", iter->val)); } lrmd_list_freeall(list); rc = 0; } else { print_result(printf("API_CALL FAILURE - no providers found\n")); rc = -1; } } else if (safe_str_eq(options.api_call, "list_standards")) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); if (rc > 0) { print_result(printf("%d standards found\n", rc)); for (iter = list; iter != NULL; iter = iter->next) { print_result(printf("%s\n", iter->val)); } lrmd_list_freeall(list); rc = 0; } else { print_result(printf("API_CALL FAILURE - no providers found\n")); rc = -1; } } else if (safe_str_eq(options.api_call, "get_recurring_ops")) { GList *op_list = NULL; GList *op_item = NULL; rc = lrmd_conn->cmds->get_recurring_ops(lrmd_conn, options.rsc_id, 0, 0, &op_list); for (op_item = op_list; op_item != NULL; op_item = op_item->next) { lrmd_op_info_t *op_info = op_item->data; print_result(printf("RECURRING_OP: %s_%s_%s timeout=%sms\n", op_info->rsc_id, op_info->action, op_info->interval_ms_s, op_info->timeout_ms_s)); lrmd_free_op_info(op_info); } g_list_free(op_list); } else if (options.api_call) { print_result(printf("API-CALL FAILURE unknown action '%s'\n", options.action)); test_exit(CRM_EX_ERROR); } if (rc < 0) { print_result(printf("API-CALL FAILURE for '%s' api_rc:%d\n", options.api_call, rc)); test_exit(CRM_EX_ERROR); } if (options.api_call && rc == pcmk_ok) { print_result(printf("API-CALL SUCCESSFUL for '%s'\n", options.api_call)); if (!options.listen) { test_exit(CRM_EX_OK); } } if (options.no_wait) { /* just make the call and exit regardless of anything else. */ test_exit(CRM_EX_OK); } return 0; } static int generate_params(void) { int rc = 0; pe_working_set_t *data_set = NULL; xmlNode *cib_xml_copy = NULL; resource_t *rsc = NULL; GHashTable *params = NULL; GHashTable *meta = NULL; GHashTableIter iter; if (options.params) { return 0; } data_set = pe_new_working_set(); if (data_set == NULL) { crm_crit("Could not allocate working set"); return -ENOMEM; } cib_conn = cib_new(); rc = cib_conn->cmds->signon(cib_conn, "cts-exec-helper", cib_query); if (rc != pcmk_ok) { crm_err("Could not connect to the CIB manager: %s", pcmk_strerror(rc)); rc = -1; goto param_gen_bail; } rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { crm_err("Error retrieving cib copy: %s (%d)", pcmk_strerror(rc), rc); goto param_gen_bail; } else if (cib_xml_copy == NULL) { rc = -ENODATA; crm_err("Error retrieving cib copy: %s (%d)", pcmk_strerror(rc), rc); goto param_gen_bail; } if (cli_config_update(&cib_xml_copy, NULL, FALSE) == FALSE) { crm_err("Error updating cib configuration"); rc = -1; goto param_gen_bail; } data_set->input = cib_xml_copy; data_set->now = crm_time_new(NULL); cluster_status(data_set); if (options.rsc_id) { rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id, pe_find_renamed|pe_find_any); } if (!rsc) { crm_err("Resource does not exist in config"); rc = -1; goto param_gen_bail; } params = crm_str_table_new(); meta = crm_str_table_new(); get_rsc_attributes(params, rsc, NULL, data_set); get_meta_attributes(meta, rsc, NULL, data_set); if (params) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { options.params = lrmd_key_value_add(options.params, key, value); } g_hash_table_destroy(params); } if (meta) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); options.params = lrmd_key_value_add(options.params, crm_name, value); free(crm_name); } g_hash_table_destroy(meta); } param_gen_bail: pe_free_working_set(data_set); return rc; } int main(int argc, char **argv) { int option_index = 0; int argerr = 0; int flag; char *key = NULL; char *val = NULL; gboolean use_tls = FALSE; crm_trigger_t *trig; crm_log_cli_init("cts-exec-helper"); crm_set_options(NULL, "mode [options]", long_options, "Inject commands into the executor, and watch for events\n"); while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch (flag) { case '?': crm_help(flag, CRM_EX_OK); break; case 'V': - options.verbose = 1; + ++options.verbose; + crm_bump_log_level(argc, argv); break; case 'Q': options.quiet = 1; options.verbose = 0; break; case 'l': options.listen = optarg; break; case 'w': options.no_wait = 1; break; case 'R': options.is_running = 1; break; case 'n': exec_call_opts = lrmd_opt_notify_orig_only; break; case 'o': exec_call_opts = lrmd_opt_notify_changes_only; break; case 'c': options.api_call = optarg; break; case 'a': options.action = optarg; break; case 'r': options.rsc_id = optarg; break; case 'x': if(optarg) { options.cancel_call_id = atoi(optarg); } break; case 'P': options.provider = optarg; break; case 'C': options.class = optarg; break; case 'T': options.type = optarg; break; case 'i': if(optarg) { options.interval_ms = crm_parse_interval_spec(optarg); } break; case 't': if(optarg) { options.timeout = atoi(optarg); } break; case 's': if(optarg) { options.start_delay = atoi(optarg); } break; case 'k': key = optarg; if (key && val) { options.params = lrmd_key_value_add(options.params, key, val); key = val = NULL; } break; case 'v': val = optarg; if (key && val) { options.params = lrmd_key_value_add(options.params, key, val); key = val = NULL; } break; case 'S': use_tls = TRUE; break; default: ++argerr; break; } } if (argerr) { crm_help('?', CRM_EX_USAGE); } if (optind > argc) { ++argerr; } if (!options.listen && (safe_str_eq(options.api_call, "metadata") || safe_str_eq(options.api_call, "list_agents") || safe_str_eq(options.api_call, "list_standards") || safe_str_eq(options.api_call, "list_ocf_providers"))) { options.no_connect = 1; } crm_log_init(NULL, LOG_INFO, TRUE, (options.verbose? TRUE : FALSE), argc, argv, FALSE); if (options.is_running) { if (!options.timeout) { options.timeout = 30000; } options.interval_ms = 0; if (!options.rsc_id) { crm_err("rsc-id must be given when is-running is used"); test_exit(CRM_EX_ERROR); } if (generate_params()) { print_result(printf ("Failed to retrieve rsc parameters from cib, can not determine if rsc is running.\n")); test_exit(CRM_EX_ERROR); } options.api_call = "exec"; options.action = "monitor"; exec_call_opts = lrmd_opt_notify_orig_only; } /* if we can't perform an api_call or listen for events, * there is nothing to do */ if (!options.api_call && !options.listen) { crm_err("Nothing to be done. Please specify 'api-call' and/or 'listen'"); return CRM_EX_OK; } if (use_tls) { lrmd_conn = lrmd_remote_api_new(NULL, "localhost", 0); } else { lrmd_conn = lrmd_api_new(); } trig = mainloop_add_trigger(G_PRIORITY_HIGH, start_test, NULL); mainloop_set_trigger(trig); mainloop_add_signal(SIGTERM, test_shutdown); crm_info("Starting"); mainloop = g_main_loop_new(NULL, FALSE); g_main_loop_run(mainloop); if (cib_conn != NULL) { cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } test_exit(CRM_EX_OK); return CRM_EX_OK; } diff --git a/daemons/schedulerd/sched_allocate.c b/daemons/schedulerd/sched_allocate.c index 7d627300d7..912a38a7c0 100644 --- a/daemons/schedulerd/sched_allocate.c +++ b/daemons/schedulerd/sched_allocate.c @@ -1,2573 +1,2577 @@ /* - * Copyright 2004-2018 Andrew Beekhof + * Copyright 2004-2019 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_allocate); void set_alloc_actions(pe_working_set_t * data_set); extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set); extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set); static void apply_remote_node_ordering(pe_working_set_t *data_set); static enum remote_connection_state get_remote_node_state(pe_node_t *node); enum remote_connection_state { remote_state_unknown = 0, remote_state_alive = 1, remote_state_resting = 2, remote_state_failed = 3, remote_state_stopped = 4 }; static const char * state2text(enum remote_connection_state state) { switch (state) { case remote_state_unknown: return "unknown"; case remote_state_alive: return "alive"; case remote_state_resting: return "resting"; case remote_state_failed: return "failed"; case remote_state_stopped: return "stopped"; } return "impossible"; } resource_alloc_functions_t resource_class_alloc_functions[] = { { native_merge_weights, native_color, native_create_actions, native_create_probe, native_internal_constraints, native_rsc_colocation_lh, native_rsc_colocation_rh, native_rsc_location, native_action_flags, native_update_actions, native_expand, native_append_meta, }, { group_merge_weights, group_color, group_create_actions, native_create_probe, group_internal_constraints, group_rsc_colocation_lh, group_rsc_colocation_rh, group_rsc_location, group_action_flags, group_update_actions, group_expand, group_append_meta, }, { clone_merge_weights, clone_color, clone_create_actions, clone_create_probe, clone_internal_constraints, clone_rsc_colocation_lh, clone_rsc_colocation_rh, clone_rsc_location, clone_action_flags, container_update_actions, clone_expand, clone_append_meta, }, { container_merge_weights, container_color, container_create_actions, container_create_probe, container_internal_constraints, container_rsc_colocation_lh, container_rsc_colocation_rh, container_rsc_location, container_action_flags, container_update_actions, container_expand, container_append_meta, } }; gboolean update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line) { static unsigned long calls = 0; gboolean changed = FALSE; gboolean clear = is_set(flags, pe_action_clear); enum pe_action_flags last = action->flags; if (clear) { action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags); } else { action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags); } if (last != action->flags) { calls++; changed = TRUE; /* Useful for tracking down _who_ changed a specific flag */ /* CRM_ASSERT(calls != 534); */ clear_bit(flags, pe_action_clear); crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)", action->uuid, action->node ? action->node->details->uname : "[none]", clear ? "un-" : "", flags, last, action->flags, calls, source); } return changed; } static gboolean check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry, gboolean active_here, pe_working_set_t * data_set) { int attr_lpc = 0; gboolean force_restart = FALSE; gboolean delete_resource = FALSE; gboolean changed = FALSE; const char *value = NULL; const char *old_value = NULL; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; for (; attr_lpc < DIMOF(attr_list); attr_lpc++) { value = crm_element_value(rsc->xml, attr_list[attr_lpc]); old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]); if (value == old_value /* i.e. NULL */ || crm_str_eq(value, old_value, TRUE)) { continue; } changed = TRUE; trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set); if (active_here) { force_restart = TRUE; crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s", rsc->id, node->details->uname, attr_list[attr_lpc], crm_str(old_value), crm_str(value)); } } if (force_restart) { /* make sure the restart happens */ stop_action(rsc, node, FALSE); set_bit(rsc->flags, pe_rsc_start_pending); delete_resource = TRUE; } else if (changed) { delete_resource = TRUE; } return delete_resource; } static void CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node, const char *reason, pe_working_set_t * data_set) { guint interval_ms = 0; action_t *cancel = NULL; const char *task = NULL; const char *call_id = NULL; const char *interval_ms_s = NULL; CRM_CHECK(xml_op != NULL, return); CRM_CHECK(active_node != NULL, return); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); interval_ms = crm_parse_ms(interval_ms_s); crm_info("Action " CRM_OP_FMT " on %s will be stopped: %s", rsc->id, task, interval_ms, active_node->details->uname, (reason? reason : "unknown")); cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set); add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id); custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set); } static gboolean check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op, pe_working_set_t * data_set) { char *key = NULL; guint interval_ms = 0; const char *interval_ms_s = NULL; const op_digest_cache_t *digest_data = NULL; gboolean did_change = FALSE; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *digest_secure = NULL; CRM_CHECK(active_node != NULL, return FALSE); interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); interval_ms = crm_parse_ms(interval_ms_s); if (interval_ms > 0) { xmlNode *op_match = NULL; /* we need to reconstruct the key because of the way we used to construct resource IDs */ key = generate_op_key(rsc->id, task, interval_ms); pe_rsc_trace(rsc, "Checking parameters for %s", key); op_match = find_rsc_op_entry(rsc, key); if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) { CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set); free(key); return TRUE; } else if (op_match == NULL) { pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname); free(key); return TRUE; } free(key); key = NULL; } crm_trace("Testing " CRM_OP_FMT " on %s", rsc->id, task, interval_ms, active_node->details->uname); if ((interval_ms == 0) && safe_str_eq(task, RSC_STATUS)) { /* Reload based on the start action not a probe */ task = RSC_START; } else if ((interval_ms == 0) && safe_str_eq(task, RSC_MIGRATED)) { /* Reload based on the start action not a migrate */ task = RSC_START; } else if ((interval_ms == 0) && safe_str_eq(task, RSC_PROMOTE)) { /* Reload based on the start action not a promote */ task = RSC_START; } digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set); if(is_set(data_set->flags, pe_flag_sanitized)) { digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST); } if(digest_data->rc != RSC_DIGEST_MATCH && digest_secure && digest_data->digest_secure_calc && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) { if (is_set(data_set->flags, pe_flag_stdout)) { printf("Only 'private' parameters to " CRM_OP_FMT " on %s changed: %s\n", rsc->id, task, interval_ms, active_node->details->uname, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); } } else if (digest_data->rc == RSC_DIGEST_RESTART) { /* Changes that force a restart */ pe_action_t *required = NULL; did_change = TRUE; key = generate_op_key(rsc->id, task, interval_ms); crm_log_xml_info(digest_data->params_restart, "params:restart"); required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set); pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL, "resource definition change", pe_action_optional, TRUE); trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set); } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) { /* Changes that can potentially be handled by a reload */ const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); did_change = TRUE; trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set); crm_log_xml_info(digest_data->params_all, "params:reload"); key = generate_op_key(rsc->id, task, interval_ms); if (interval_ms > 0) { action_t *op = NULL; #if 0 /* Always reload/restart the entire resource */ ReloadRsc(rsc, active_node, data_set); #else /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */ op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_reschedule); #endif } else if (digest_restart) { pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id); /* Reload this resource */ ReloadRsc(rsc, active_node, data_set); free(key); } else { pe_action_t *required = NULL; pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id); /* Re-send the start/demote/promote op * Recurring ops will be detected independently */ required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set); pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL, "resource definition change", pe_action_optional, TRUE); } } return did_change; } /*! * \internal * \brief Do deferred action checks after allocation * * \param[in] data_set Working set for cluster */ static void check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op, enum pe_check_parameters check, pe_working_set_t *data_set) { const char *reason = NULL; op_digest_cache_t *digest_data = NULL; switch (check) { case pe_check_active: if (check_action_definition(rsc, node, rsc_op, data_set) && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { reason = "action definition changed"; } break; case pe_check_last_failure: digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set); switch (digest_data->rc) { case RSC_DIGEST_UNKNOWN: crm_trace("Resource %s history entry %s on %s has no digest to compare", rsc->id, ID(rsc_op), node->details->id); break; case RSC_DIGEST_MATCH: break; default: reason = "resource parameters have changed"; break; } break; } if (reason) { pe__clear_failcount(rsc, node, reason, data_set); } } static void check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set) { GListPtr gIter = NULL; int offset = -1; guint interval_ms = 0; int stop_index = 0; int start_index = 0; const char *task = NULL; const char *interval_ms_s = NULL; xmlNode *rsc_op = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; CRM_CHECK(node != NULL, return); if (is_set(rsc->flags, pe_rsc_orphan)) { resource_t *parent = uber_parent(rsc); if(parent == NULL || pe_rsc_is_clone(parent) == FALSE || is_set(parent->flags, pe_rsc_unique)) { pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id); DeleteRsc(rsc, node, FALSE, data_set); } else { pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id); } return; } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) { if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s", rsc->id, node->details->uname); return; } pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname); if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; offset++; if (start_index < stop_index) { /* stopped */ continue; } else if (offset < start_index) { /* action occurred prior to a start */ continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS); interval_ms = crm_parse_ms(interval_ms_s); if ((interval_ms > 0) && (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) { // Maintenance mode cancels recurring operations CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set); } else if ((interval_ms > 0) || safe_str_eq(task, RSC_STATUS) || safe_str_eq(task, RSC_START) || safe_str_eq(task, RSC_PROMOTE) || safe_str_eq(task, RSC_MIGRATED)) { /* If a resource operation failed, and the operation's definition * has changed, clear any fail count so they can be retried fresh. */ if (container_fix_remote_addr(rsc)) { /* We haven't allocated resources to nodes yet, so if the * REMOTE_CONTAINER_HACK is used, we may calculate the digest * based on the literal "#uname" value rather than the properly * substituted value. That would mistakenly make the action * definition appear to have been changed. Defer the check until * later in this case. */ pe__add_param_check(rsc_op, rsc, node, pe_check_active, data_set); } else if (check_action_definition(rsc, node, rsc_op, data_set) && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { pe__clear_failcount(rsc, node, "action definition changed", data_set); } } } g_list_free(sorted_op_list); } static GListPtr find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones, gboolean partial, pe_working_set_t * data_set) { GListPtr gIter = NULL; gboolean match = FALSE; if (id == NULL) { return NULL; } else if (rsc == NULL && data_set) { for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } return result; } else if (rsc == NULL) { return NULL; } if (partial) { if (strstr(rsc->id, id)) { match = TRUE; } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) { match = TRUE; } } else { if (strcmp(rsc->id, id) == 0) { match = TRUE; } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = TRUE; } } if (match) { result = g_list_prepend(result, rsc); } if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } } return result; } static void check_actions(pe_working_set_t * data_set) { const char *id = NULL; node_t *node = NULL; xmlNode *lrm_rscs = NULL; xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); xmlNode *node_state = NULL; for (node_state = __xml_first_child(status); node_state != NULL; node_state = __xml_next_element(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { id = crm_element_value(node_state, XML_ATTR_ID); lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE); node = pe_find_node_id(data_set->nodes, id); if (node == NULL) { continue; /* Still need to check actions for a maintenance node to cancel existing monitor operations */ } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) { crm_trace("Skipping param check for %s: can't run resources", node->details->uname); continue; } crm_trace("Processing node %s", node->details->uname); if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { xmlNode *rsc_entry = NULL; for (rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { if (xml_has_children(rsc_entry)) { GListPtr gIter = NULL; GListPtr result = NULL; const char *rsc_id = ID(rsc_entry); CRM_CHECK(rsc_id != NULL, return); result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set); for (gIter = result; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->variant != pe_native) { continue; } check_actions_for(rsc_entry, rsc, node, data_set); } g_list_free(result); } } } } } } } static gboolean apply_placement_constraints(pe_working_set_t * data_set) { GListPtr gIter = NULL; crm_trace("Applying constraints..."); for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { pe__location_t *cons = gIter->data; cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons); } return TRUE; } static gboolean failcount_clear_action_exists(node_t * node, resource_t * rsc) { gboolean rc = FALSE; char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); GListPtr list = find_actions_exact(rsc->actions, key, node); if (list) { rc = TRUE; } g_list_free(list); free(key); return rc; } /*! * \internal * \brief Force resource away if failures hit migration threshold * * \param[in,out] rsc Resource to check for failures * \param[in,out] node Node to check for failures * \param[in,out] data_set Cluster working set to update */ static void check_migration_threshold(resource_t *rsc, node_t *node, pe_working_set_t *data_set) { int fail_count, countdown; resource_t *failed; /* Migration threshold of 0 means never force away */ if (rsc->migration_threshold == 0) { return; } // If we're ignoring failures, also ignore the migration threshold if (is_set(rsc->flags, pe_rsc_failure_ignored)) { return; } /* If there are no failures, there's no need to force away */ fail_count = pe_get_failcount(node, rsc, NULL, pe_fc_effective|pe_fc_fillers, NULL, data_set); if (fail_count <= 0) { return; } /* How many more times recovery will be tried on this node */ countdown = QB_MAX(rsc->migration_threshold - fail_count, 0); /* If failed resource has a parent, we'll force the parent away */ failed = rsc; if (is_not_set(rsc->flags, pe_rsc_unique)) { failed = uber_parent(rsc); } if (countdown == 0) { resource_location(failed, node, -INFINITY, "__fail_limit__", data_set); crm_warn("Forcing %s away from %s after %d failures (max=%d)", failed->id, node->details->uname, fail_count, rsc->migration_threshold); } else { crm_info("%s can fail %d more times on %s before being forced off", failed->id, countdown, node->details->uname); } } static void common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set) { if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; common_apply_stickiness(child_rsc, node, data_set); } return; } if (is_set(rsc->flags, pe_rsc_managed) && rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) { node_t *current = pe_find_node_id(rsc->running_on, node->details->id); node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (current == NULL) { } else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) { resource_t *sticky_rsc = rsc; resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set); pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location" " (node=%s, weight=%d)", sticky_rsc->id, node->details->uname, rsc->stickiness); } else { GHashTableIter iter; node_t *nIter = NULL; pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric" " and node %s is not explicitly allowed", rsc->id, node->details->uname); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) { crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight); } } } /* Check the migration threshold only if a failcount clear action * has not already been placed for this resource on the node. * There is no sense in potentially forcing the resource from this * node if the failcount is being reset anyway. * * @TODO A clear_failcount operation can be scheduled in stage4() via * check_actions_for(), or in stage5() via check_params(). This runs in * stage2(), so it cannot detect those, meaning we might check the migration * threshold when we shouldn't -- worst case, we stop or move the resource, * then move it back next transition. */ if (failcount_clear_action_exists(node, rsc) == FALSE) { check_migration_threshold(rsc, node, data_set); } } void complex_set_cmds(resource_t * rsc) { GListPtr gIter = rsc->children; rsc->cmds = &resource_class_alloc_functions[rsc->variant]; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; complex_set_cmds(child_rsc); } } void set_alloc_actions(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; complex_set_cmds(rsc); } } static void calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data) { const char *key = (const char *)gKey; const char *value = (const char *)gValue; int *system_health = (int *)user_data; if (!gKey || !gValue || !user_data) { return; } if (crm_starts_with(key, "#health")) { int score; /* Convert the value into an integer */ score = char2score(value); /* Add it to the running total */ *system_health = merge_weights(score, *system_health); } } static gboolean apply_system_health(pe_working_set_t * data_set) { GListPtr gIter = NULL; const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy"); int base_health = 0; if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) { /* Prevent any accidental health -> score translation */ node_score_red = 0; node_score_yellow = 0; node_score_green = 0; return TRUE; } else if (safe_str_eq(health_strategy, "migrate-on-red")) { /* Resources on nodes which have health values of red are * weighted away from that node. */ node_score_red = -INFINITY; node_score_yellow = 0; node_score_green = 0; } else if (safe_str_eq(health_strategy, "only-green")) { /* Resources on nodes which have health values of red or yellow * are forced away from that node. */ node_score_red = -INFINITY; node_score_yellow = -INFINITY; node_score_green = 0; } else if (safe_str_eq(health_strategy, "progressive")) { /* Same as the above, but use the r/y/g scores provided by the user * Defaults are provided by the pe_prefs table * Also, custom health "base score" can be used */ base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0"); } else if (safe_str_eq(health_strategy, "custom")) { /* Requires the admin to configure the rsc_location constaints for * processing the stored health scores */ /* TODO: Check for the existence of appropriate node health constraints */ return TRUE; } else { crm_err("Unknown node health strategy: %s", health_strategy); return FALSE; } crm_info("Applying automated node health strategy: %s", health_strategy); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { int system_health = base_health; node_t *node = (node_t *) gIter->data; /* Search through the node hash table for system health entries. */ g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health); crm_info(" Node %s has an combined system health of %d", node->details->uname, system_health); /* If the health is non-zero, then create a new rsc2node so that the * weight will be added later on. */ if (system_health != 0) { GListPtr gIter2 = data_set->resources; for (; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set); } } } return TRUE; } gboolean stage0(pe_working_set_t * data_set) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); if (data_set->input == NULL) { return FALSE; } if (is_set(data_set->flags, pe_flag_have_status) == FALSE) { crm_trace("Calculating status"); cluster_status(data_set); } set_alloc_actions(data_set); apply_system_health(data_set); unpack_constraints(cib_constraints, data_set); return TRUE; } /* * Check nodes for resources started outside of the LRM */ gboolean probe_resources(pe_working_set_t * data_set) { action_t *probe_node_complete = NULL; for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED); if (node->details->online == FALSE) { if (is_baremetal_remote_node(node) && node->details->remote_rsc && (get_remote_node_state(node) == remote_state_failed)) { pe_fence_node(data_set, node, "the connection is unrecoverable"); } continue; } else if (node->details->unclean) { continue; } else if (node->details->rsc_discovery_enabled == FALSE) { /* resource discovery is disabled for this node */ continue; } if (probed != NULL && crm_is_true(probed) == FALSE) { action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname), CRM_OP_REPROBE, node, FALSE, TRUE, data_set); add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); continue; } for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set); } } return TRUE; } static void rsc_discover_filter(resource_t *rsc, node_t *node) { GListPtr gIter = rsc->children; resource_t *top = uber_parent(rsc); node_t *match; if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) { return; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; rsc_discover_filter(child_rsc, node); } match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match && match->rsc_discover_mode != pe_discover_exclusive) { match->weight = -INFINITY; } } /* * Count how many valid nodes we have (so we know the maximum number of * colors we can resolve). * * Apply node constraints (i.e. filter the "allowed_nodes" part of resources) */ gboolean stage2(pe_working_set_t * data_set) { GListPtr gIter = NULL; crm_trace("Applying placement constraints"); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node == NULL) { /* error */ } else if (node->weight >= 0.0 /* global weight */ && node->details->online && node->details->type != node_ping) { data_set->max_valid_nodes++; } } apply_placement_constraints(data_set); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { GListPtr gIter2 = NULL; node_t *node = (node_t *) gIter->data; gIter2 = data_set->resources; for (; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; common_apply_stickiness(rsc, node, data_set); rsc_discover_filter(rsc, node); } } return TRUE; } /* * Create internal resource constraints before allocation */ gboolean stage3(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; rsc->cmds->internal_constraints(rsc, data_set); } return TRUE; } /* * Check for orphaned or redefined actions */ gboolean stage4(pe_working_set_t * data_set) { check_actions(data_set); return TRUE; } static void * convert_const_pointer(const void *ptr) { /* Worst function ever */ return (void *)ptr; } static gint sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data) { int rc = 0; int r1_weight = -INFINITY; int r2_weight = -INFINITY; const char *reason = "existence"; const GListPtr nodes = (GListPtr) data; const resource_t *resource1 = a; const resource_t *resource2 = b; node_t *r1_node = NULL; node_t *r2_node = NULL; GListPtr gIter = NULL; GHashTable *r1_nodes = NULL; GHashTable *r2_nodes = NULL; if (a == NULL && b == NULL) { goto done; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } reason = "priority"; r1_weight = resource1->priority; r2_weight = resource2->priority; if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } reason = "no node list"; if (nodes == NULL) { goto done; } r1_nodes = rsc_merge_weights(convert_const_pointer(resource1), resource1->id, NULL, NULL, 1, pe_weights_forward | pe_weights_init); dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes); r2_nodes = rsc_merge_weights(convert_const_pointer(resource2), resource2->id, NULL, NULL, 1, pe_weights_forward | pe_weights_init); dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes); /* Current location score */ reason = "current location"; r1_weight = -INFINITY; r2_weight = -INFINITY; if (resource1->running_on) { r1_node = pe__current_node(resource1); r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id); if (r1_node != NULL) { r1_weight = r1_node->weight; } } if (resource2->running_on) { r2_node = pe__current_node(resource2); r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id); if (r2_node != NULL) { r2_weight = r2_node->weight; } } if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } reason = "score"; for (gIter = nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; r1_node = NULL; r2_node = NULL; r1_weight = -INFINITY; if (r1_nodes) { r1_node = g_hash_table_lookup(r1_nodes, node->details->id); } if (r1_node) { r1_weight = r1_node->weight; } r2_weight = -INFINITY; if (r2_nodes) { r2_node = g_hash_table_lookup(r2_nodes, node->details->id); } if (r2_node) { r2_weight = r2_node->weight; } if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } } done: crm_trace("%s (%d) on %s %c %s (%d) on %s: %s", resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a", rc < 0 ? '>' : rc > 0 ? '<' : '=', resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason); if (r1_nodes) { g_hash_table_destroy(r1_nodes); } if (r2_nodes) { g_hash_table_destroy(r2_nodes); } return rc; } static void allocate_resources(pe_working_set_t * data_set) { GListPtr gIter = NULL; if (is_set(data_set->flags, pe_flag_have_remote_nodes)) { /* Force remote connection resources to be allocated first. This * also forces any colocation dependencies to be allocated as well */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->is_remote_node == FALSE) { continue; } pe_rsc_trace(rsc, "Allocating: %s", rsc->id); /* For remote node connection resources, always prefer the partial * migration target during resource allocation, if the rsc is in the * middle of a migration. */ rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set); } } /* now do the rest of the resources */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (rsc->is_remote_node == TRUE) { continue; } pe_rsc_trace(rsc, "Allocating: %s", rsc->id); rsc->cmds->allocate(rsc, NULL, data_set); } } /* We always use pe_order_preserve with these convenience functions to exempt * internally generated constraints from the prohibition of user constraints * involving remote connection resources. * * The start ordering additionally uses pe_order_runnable_left so that the * specified action is not runnable if the start is not runnable. */ static inline void order_start_then_action(resource_t *lh_rsc, action_t *rh_action, enum pe_ordering extra, pe_working_set_t *data_set) { if (lh_rsc && rh_action && data_set) { custom_action_order(lh_rsc, start_key(lh_rsc), NULL, rh_action->rsc, NULL, rh_action, pe_order_preserve | pe_order_runnable_left | extra, data_set); } } static inline void order_action_then_stop(action_t *lh_action, resource_t *rh_rsc, enum pe_ordering extra, pe_working_set_t *data_set) { if (lh_action && rh_rsc && data_set) { custom_action_order(lh_action->rsc, NULL, lh_action, rh_rsc, stop_key(rh_rsc), NULL, pe_order_preserve | extra, data_set); } } static void cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) { return; } /* Don't recurse into ->children, those are just unallocated clone instances */ if(is_not_set(rsc->flags, pe_rsc_orphan)) { return; } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node->details->online && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { pe_action_t *clear_op = NULL; clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set); /* We can't use order_action_then_stop() here because its * pe_order_preserve breaks things */ custom_action_order(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc), NULL, pe_order_optional, data_set); } } } gboolean stage5(pe_working_set_t * data_set) { GListPtr gIter = NULL; if (safe_str_neq(data_set->placement_strategy, "default")) { GListPtr nodes = g_list_copy(data_set->nodes); nodes = sort_nodes_by_weight(nodes, NULL, data_set); data_set->resources = g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes); g_list_free(nodes); } gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Original", node); } crm_trace("Allocating services"); /* Take (next) highest resource, assign it and create its actions */ allocate_resources(data_set); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Remaining", node); } // Process deferred action checks pe__foreach_param_check(data_set, check_params); pe__free_param_checks(data_set); if (is_set(data_set->flags, pe_flag_startup_probes)) { crm_trace("Calculating needed probes"); /* This code probably needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=100: With probes: ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints 36s ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph Without probes: ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph */ probe_resources(data_set); } crm_trace("Handle orphans"); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; cleanup_orphans(rsc, data_set); } crm_trace("Creating actions"); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; rsc->cmds->create_actions(rsc, data_set); } crm_trace("Creating done"); return TRUE; } static gboolean is_managed(const resource_t * rsc) { GListPtr gIter = rsc->children; if (is_set(rsc->flags, pe_rsc_managed)) { return TRUE; } for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; if (is_managed(child_rsc)) { return TRUE; } } return FALSE; } static gboolean any_managed_resources(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; if (is_managed(rsc)) { return TRUE; } } return FALSE; } /*! * \internal * \brief Create pseudo-op for guest node fence, and order relative to it * * \param[in] node Guest node to fence * \param[in] data_set Working set of CIB state */ static void fence_guest(pe_node_t *node, pe_working_set_t *data_set) { resource_t *container = node->details->remote_rsc->container; pe_action_t *stop = NULL; pe_action_t *stonith_op = NULL; /* The fence action is just a label; we don't do anything differently for * off vs. reboot. We specify it explicitly, rather than let it default to * cluster's default action, because we are not _initiating_ fencing -- we * are creating a pseudo-event to describe fencing that is already occurring * by other means (container recovery). */ const char *fence_action = "off"; /* Check whether guest's container resource has any explicit stop or * start (the stop may be implied by fencing of the guest's host). */ if (container) { stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL); if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) { fence_action = "reboot"; } } /* Create a fence pseudo-event, so we have an event to order actions * against, and the controller can always detect it. */ stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", data_set); update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable, __FUNCTION__, __LINE__); /* We want to imply stops/demotes after the guest is stopped, not wait until * it is restarted, so we always order pseudo-fencing after stop, not start * (even though start might be closer to what is done for a real reboot). */ if(stop && is_set(stop->flags, pe_action_pseudo)) { pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, data_set); crm_info("Implying guest node %s is down (action %d) after %s fencing", node->details->uname, stonith_op->id, stop->node->details->uname); order_actions(parent_stonith_op, stonith_op, pe_order_runnable_left|pe_order_implies_then); } else if (stop) { order_actions(stop, stonith_op, pe_order_runnable_left|pe_order_implies_then); crm_info("Implying guest node %s is down (action %d) " "after container %s is stopped (action %d)", node->details->uname, stonith_op->id, container->id, stop->id); } else { /* If we're fencing the guest node but there's no stop for the guest * resource, we must think the guest is already stopped. However, we may * think so because its resource history was just cleaned. To avoid * unnecessarily considering the guest node down if it's really up, * order the pseudo-fencing after any stop of the connection resource, * which will be ordered after any container (re-)probe. */ stop = find_first_action(node->details->remote_rsc->actions, NULL, RSC_STOP, NULL); if (stop) { order_actions(stop, stonith_op, pe_order_optional); crm_info("Implying guest node %s is down (action %d) " "after connection is stopped (action %d)", node->details->uname, stonith_op->id, stop->id); } else { /* Not sure why we're fencing, but everything must already be * cleanly stopped. */ crm_info("Implying guest node %s is down (action %d) ", node->details->uname, stonith_op->id); } } /* Order/imply other actions relative to pseudo-fence as with real fence */ stonith_constraints(node, stonith_op, data_set); } /* * Create dependencies for stonith and shutdown operations */ gboolean stage6(pe_working_set_t * data_set) { action_t *dc_down = NULL; action_t *stonith_op = NULL; - action_t *last_stonith = NULL; gboolean integrity_lost = FALSE; gboolean need_stonith = TRUE; GListPtr gIter; GListPtr stonith_ops = NULL; + GList *shutdown_ops = NULL; /* Remote ordering constraints need to happen prior to calculate * fencing because it is one more place we will mark the node as * dirty. * * A nice side-effect of doing it first is that we can remove a * bunch of special logic from apply_*_ordering() because its * already part of pe_fence_node() */ crm_trace("Creating remote ordering constraints"); apply_remote_node_ordering(data_set); crm_trace("Processing fencing and shutdown cases"); if (any_managed_resources(data_set) == FALSE) { crm_notice("Delaying fencing operations until there are resources to manage"); need_stonith = FALSE; } /* Check each node for stonith/shutdown */ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; /* Guest nodes are "fenced" by recovering their container resource, * so handle them separately. */ if (is_container_remote_node(node)) { if (node->details->remote_requires_reset && need_stonith) { fence_guest(node, data_set); } continue; } stonith_op = NULL; if (node->details->unclean && need_stonith && pe_can_fence(data_set, node)) { stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", data_set); pe_warn("Scheduling Node %s for STONITH", node->details->uname); stonith_constraints(node, stonith_op, data_set); if (node->details->is_dc) { + // Remember if the DC is being fenced dc_down = stonith_op; - } else if (is_set(data_set->flags, pe_flag_concurrent_fencing) == FALSE) { - if (last_stonith) { - order_actions(last_stonith, stonith_op, pe_order_optional); + } else { + + if (is_not_set(data_set->flags, pe_flag_concurrent_fencing) + && (stonith_ops != NULL)) { + /* Concurrent fencing is disabled, so order each non-DC + * fencing in a chain. If there is any DC fencing or + * shutdown, it will be ordered after the last action in the + * chain later. + */ + order_actions((pe_action_t *) stonith_ops->data, + stonith_op, pe_order_optional); } - last_stonith = stonith_op; - } else { - stonith_ops = g_list_append(stonith_ops, stonith_op); + // Remember all non-DC fencing actions in a separate list + stonith_ops = g_list_prepend(stonith_ops, stonith_op); } } else if (node->details->online && node->details->shutdown && /* TODO define what a shutdown op means for a remote node. * For now we do not send shutdown operations for remote nodes, but * if we can come up with a good use for this in the future, we will. */ is_remote_node(node) == FALSE) { - action_t *down_op = NULL; - - crm_notice("Scheduling Node %s for shutdown", node->details->uname); - - down_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname), - CRM_OP_SHUTDOWN, node, FALSE, TRUE, data_set); - - shutdown_constraints(node, down_op, data_set); - add_hash_param(down_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); + action_t *down_op = sched_shutdown_op(node, data_set); if (node->details->is_dc) { + // Remember if the DC is being shut down dc_down = down_op; + } else { + // Remember non-DC shutdowns for later ordering + shutdown_ops = g_list_prepend(shutdown_ops, down_op); } } if (node->details->unclean && stonith_op == NULL) { integrity_lost = TRUE; pe_warn("Node %s is unclean!", node->details->uname); } } if (integrity_lost) { if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED"); pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE"); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) { crm_notice("Cannot fence unclean nodes until quorum is" " attained (or no-quorum-policy is set to ignore)"); } } if (dc_down != NULL) { - GListPtr gIter = NULL; - - crm_trace("Ordering shutdowns before %s on %s (DC)", - dc_down->task, dc_down->node->details->uname); - - add_hash_param(dc_down->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); + /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated + * DC elections. However, we don't want to order non-DC shutdowns before + * a DC *fencing*, because even though we don't want a node that's + * shutting down to become DC, the DC fencing could be ordered before a + * clone stop that's also ordered before the shutdowns, thus leading to + * a graph loop. + */ + if (safe_str_eq(dc_down->task, CRM_OP_SHUTDOWN)) { + for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) { + action_t *node_stop = (action_t *) gIter->data; - for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { - action_t *node_stop = (action_t *) gIter->data; + crm_debug("Ordering shutdown on %s before %s on DC %s", + node_stop->node->details->uname, + dc_down->task, dc_down->node->details->uname); - if (safe_str_neq(CRM_OP_SHUTDOWN, node_stop->task)) { - continue; - } else if (node_stop->node->details->is_dc) { - continue; + order_actions(node_stop, dc_down, pe_order_optional); } - - crm_debug("Ordering shutdown on %s before %s on %s", - node_stop->node->details->uname, - dc_down->task, dc_down->node->details->uname); - - order_actions(node_stop, dc_down, pe_order_optional); } - if (last_stonith) { - if (dc_down != last_stonith) { - order_actions(last_stonith, dc_down, pe_order_optional); - } + // Order any non-DC fencing before any DC fencing or shutdown - } else { - GListPtr gIter2 = NULL; - - for (gIter2 = stonith_ops; gIter2 != NULL; gIter2 = gIter2->next) { - stonith_op = (action_t *) gIter2->data; - - if (dc_down != stonith_op) { - order_actions(stonith_op, dc_down, pe_order_optional); - } + if (is_set(data_set->flags, pe_flag_concurrent_fencing)) { + /* With concurrent fencing, order each non-DC fencing action + * separately before any DC fencing or shutdown. + */ + for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) { + order_actions((pe_action_t *) gIter->data, dc_down, + pe_order_optional); } + } else if (stonith_ops) { + /* Without concurrent fencing, the non-DC fencing actions are + * already ordered relative to each other, so we just need to order + * the DC fencing after the last action in the chain (which is the + * first item in the list). + */ + order_actions((pe_action_t *) stonith_ops->data, dc_down, + pe_order_optional); } } g_list_free(stonith_ops); + g_list_free(shutdown_ops); return TRUE; } /* * Determine the sets of independent actions and the correct order for the * actions in each set. * * Mark dependencies of un-runnable actions un-runnable * */ static GListPtr find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key) { GListPtr list = NULL; list = find_actions(actions, original_key, NULL); if (list == NULL) { /* we're potentially searching a child of the original resource */ char *key = NULL; char *task = NULL; guint interval_ms = 0; if (parse_op_key(original_key, NULL, &task, &interval_ms)) { key = generate_op_key(rsc->id, task, interval_ms); list = find_actions(actions, key, NULL); } else { crm_err("search key: %s", original_key); } free(key); free(task); } return list; } static void rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc, pe__ordering_t *order) { GListPtr gIter = NULL; GListPtr rh_actions = NULL; action_t *rh_action = NULL; enum pe_ordering type; CRM_CHECK(rsc != NULL, return); CRM_CHECK(order != NULL, return); type = order->type; rh_action = order->rh_action; crm_trace("Processing RH of ordering constraint %d", order->id); if (rh_action != NULL) { rh_actions = g_list_prepend(NULL, rh_action); } else if (rsc != NULL) { rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task); } if (rh_actions == NULL) { pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..." " ignoring", rsc->id, order->rh_action_task); if (lh_action) { pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid); } return; } if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) { pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid, order->rh_action_task); clear_bit(type, pe_order_implies_then); } gIter = rh_actions; for (; gIter != NULL; gIter = gIter->next) { action_t *rh_action_iter = (action_t *) gIter->data; if (lh_action) { order_actions(lh_action, rh_action_iter, type); } else if (type & pe_order_implies_then) { update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type); } else { crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type); } } g_list_free(rh_actions); } static void rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order, pe_working_set_t *data_set) { GListPtr gIter = NULL; GListPtr lh_actions = NULL; action_t *lh_action = order->lh_action; resource_t *rh_rsc = order->rh_rsc; crm_trace("Processing LH of ordering constraint %d", order->id); CRM_ASSERT(lh_rsc != NULL); if (lh_action != NULL) { lh_actions = g_list_prepend(NULL, lh_action); } else { lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task); } if (lh_actions == NULL && lh_rsc != rh_rsc) { char *key = NULL; char *op_type = NULL; guint interval_ms = 0; parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms); key = generate_op_key(lh_rsc->id, op_type, interval_ms); if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) { free(key); pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) { free(key); pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } else { pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set); lh_actions = g_list_prepend(NULL, lh_action); } free(op_type); } gIter = lh_actions; for (; gIter != NULL; gIter = gIter->next) { action_t *lh_action_iter = (action_t *) gIter->data; if (rh_rsc == NULL && order->rh_action) { rh_rsc = order->rh_action->rsc; } if (rh_rsc) { rsc_order_then(lh_action_iter, rh_rsc, order); } else if (order->rh_action) { order_actions(lh_action_iter, order->rh_action, order->type); } } g_list_free(lh_actions); } extern void update_colo_start_chain(pe_action_t *action, pe_working_set_t *data_set); static int is_recurring_action(action_t *action) { const char *interval_ms_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS); guint interval_ms = crm_parse_ms(interval_ms_s); return (interval_ms > 0); } static void apply_container_ordering(action_t *action, pe_working_set_t *data_set) { /* VMs are also classified as containers for these purposes... in * that they both involve a 'thing' running on a real or remote * cluster node. * * This allows us to be smarter about the type and extent of * recovery actions required in various scenarios */ resource_t *remote_rsc = NULL; resource_t *container = NULL; enum action_tasks task = text2task(action->task); CRM_ASSERT(action->rsc); CRM_ASSERT(action->node); CRM_ASSERT(is_remote_node(action->node)); remote_rsc = action->node->details->remote_rsc; CRM_ASSERT(remote_rsc); container = remote_rsc->container; CRM_ASSERT(container); if(is_set(container->flags, pe_rsc_failed)) { pe_fence_node(data_set, action->node, "container failed"); } crm_trace("Order %s action %s relative to %s%s for %s%s", action->task, action->uuid, is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", remote_rsc->id, is_set(container->flags, pe_rsc_failed)? "failed " : "", container->id); if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) { /* Migration ops map to "no_action", but we need to apply the same * ordering as for stop or demote (see get_router_node()). */ task = stop_rsc; } switch (task) { case start_rsc: case action_promote: /* Force resource recovery if the container is recovered */ order_start_then_action(container, action, pe_order_implies_then, data_set); /* Wait for the connection resource to be up too */ order_start_then_action(remote_rsc, action, pe_order_none, data_set); break; case stop_rsc: case action_demote: if (is_set(container->flags, pe_rsc_failed)) { /* When the container representing a guest node fails, any stop * or demote actions for resources running on the guest node * are implied by the container stopping. This is similar to * how fencing operations work for cluster nodes and remote * nodes. */ } else { /* Ensure the operation happens before the connection is brought * down. * * If we really wanted to, we could order these after the * connection start, IFF the container's current role was * stopped (otherwise we re-introduce an ordering loop when the * connection is restarting). */ order_action_then_stop(action, remote_rsc, pe_order_none, data_set); } break; default: /* Wait for the connection resource to be up */ if (is_recurring_action(action)) { /* In case we ever get the recovery logic wrong, force * recurring monitors to be restarted, even if just * the connection was re-established */ if(task != no_action) { order_start_then_action(remote_rsc, action, pe_order_implies_then, data_set); } } else { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } break; } } static enum remote_connection_state get_remote_node_state(pe_node_t *node) { resource_t *remote_rsc = NULL; node_t *cluster_node = NULL; CRM_ASSERT(node); remote_rsc = node->details->remote_rsc; CRM_ASSERT(remote_rsc); cluster_node = pe__current_node(remote_rsc); /* If the cluster node the remote connection resource resides on * is unclean or went offline, we can't process any operations * on that remote node until after it starts elsewhere. */ if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) { /* The connection resource is not going to run anywhere */ if (cluster_node && cluster_node->details->unclean) { /* The remote connection is failed because its resource is on a * failed node and can't be recovered elsewhere, so we must fence. */ return remote_state_failed; } if (is_not_set(remote_rsc->flags, pe_rsc_failed)) { /* Connection resource is cleanly stopped */ return remote_state_stopped; } /* Connection resource is failed */ if ((remote_rsc->next_role == RSC_ROLE_STOPPED) && remote_rsc->remote_reconnect_ms && node->details->remote_was_fenced) { /* We won't know whether the connection is recoverable until the * reconnect interval expires and we reattempt connection. */ return remote_state_unknown; } /* The remote connection is in a failed state. If there are any * resources known to be active on it (stop) or in an unknown state * (probe), we must assume the worst and fence it. */ return remote_state_failed; } else if (cluster_node == NULL) { /* Connection is recoverable but not currently running anywhere, see if we can recover it first */ return remote_state_unknown; } else if(cluster_node->details->unclean == TRUE || cluster_node->details->online == FALSE) { /* Connection is running on a dead node, see if we can recover it first */ return remote_state_resting; } else if (g_list_length(remote_rsc->running_on) > 1 && remote_rsc->partial_migration_source && remote_rsc->partial_migration_target) { /* We're in the middle of migrating a connection resource, * wait until after the resource migrates before performing * any actions. */ return remote_state_resting; } return remote_state_alive; } /*! * \internal * \brief Order actions on remote node relative to actions for the connection */ static void apply_remote_ordering(action_t *action, pe_working_set_t *data_set) { resource_t *remote_rsc = NULL; enum action_tasks task = text2task(action->task); enum remote_connection_state state = get_remote_node_state(action->node); enum pe_ordering order_opts = pe_order_none; if (action->rsc == NULL) { return; } CRM_ASSERT(action->node); CRM_ASSERT(is_remote_node(action->node)); remote_rsc = action->node->details->remote_rsc; CRM_ASSERT(remote_rsc); crm_trace("Order %s action %s relative to %s%s (state: %s)", action->task, action->uuid, is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", remote_rsc->id, state2text(state)); if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) { /* Migration ops map to "no_action", but we need to apply the same * ordering as for stop or demote (see get_router_node()). */ task = stop_rsc; } switch (task) { case start_rsc: case action_promote: order_opts = pe_order_none; if (state == remote_state_failed) { /* Force recovery, by making this action required */ order_opts |= pe_order_implies_then; } /* Ensure connection is up before running this action */ order_start_then_action(remote_rsc, action, order_opts, data_set); break; case stop_rsc: if(state == remote_state_alive) { order_action_then_stop(action, remote_rsc, pe_order_implies_first, data_set); } else if(state == remote_state_failed) { /* We would only be here if the resource is * running on the remote node. Since we have no * way to stop it, it is necessary to fence the * node. */ pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable"); order_action_then_stop(action, remote_rsc, pe_order_implies_first, data_set); } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) { /* State must be remote_state_unknown or remote_state_stopped. * Since the connection is not coming back up in this * transition, stop this resource first. */ order_action_then_stop(action, remote_rsc, pe_order_implies_first, data_set); } else { /* The connection is going to be started somewhere else, so * stop this resource after that completes. */ order_start_then_action(remote_rsc, action, pe_order_none, data_set); } break; case action_demote: /* Only order this demote relative to the connection start if the * connection isn't being torn down. Otherwise, the demote would be * blocked because the connection start would not be allowed. */ if(state == remote_state_resting || state == remote_state_unknown) { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } /* Otherwise we can rely on the stop ordering */ break; default: /* Wait for the connection resource to be up */ if (is_recurring_action(action)) { /* In case we ever get the recovery logic wrong, force * recurring monitors to be restarted, even if just * the connection was re-established */ order_start_then_action(remote_rsc, action, pe_order_implies_then, data_set); } else { node_t *cluster_node = pe__current_node(remote_rsc); if(task == monitor_rsc && state == remote_state_failed) { /* We would only be here if we do not know the * state of the resource on the remote node. * Since we have no way to find out, it is * necessary to fence the node. */ pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable"); } if(cluster_node && state == remote_state_stopped) { /* The connection is currently up, but is going * down permanently. * * Make sure we check services are actually * stopped _before_ we let the connection get * closed */ order_action_then_stop(action, remote_rsc, pe_order_runnable_left, data_set); } else { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } } break; } } static void apply_remote_node_ordering(pe_working_set_t *data_set) { if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) { return; } for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; resource_t *remote = NULL; // We are only interested in resource actions if (action->rsc == NULL) { continue; } /* Special case: If we are clearing the failcount of an actual * remote connection resource, then make sure this happens before * any start of the resource in this transition. */ if (action->rsc->is_remote_node && safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { custom_action_order(action->rsc, NULL, action, action->rsc, generate_op_key(action->rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set); continue; } // We are only interested in actions allocated to a node if (action->node == NULL) { continue; } if (is_remote_node(action->node) == FALSE) { continue; } /* We are only interested in real actions. * * @TODO This is probably wrong; pseudo-actions might be converted to * real actions and vice versa later in update_actions() at the end of * stage7(). */ if (is_set(action->flags, pe_action_pseudo)) { continue; } remote = action->node->details->remote_rsc; if (remote == NULL) { // Orphaned continue; } /* Another special case: if a resource is moving to a Pacemaker Remote * node, order the stop on the original node after any start of the * remote connection. This ensures that if the connection fails to * start, we leave the resource running on the original node. */ if (safe_str_eq(action->task, RSC_START)) { for (GList *item = action->rsc->actions; item != NULL; item = item->next) { pe_action_t *rsc_action = item->data; if ((rsc_action->node->details != action->node->details) && safe_str_eq(rsc_action->task, RSC_STOP)) { custom_action_order(remote, start_key(remote), NULL, action->rsc, NULL, rsc_action, pe_order_optional, data_set); } } } /* The action occurs across a remote connection, so create * ordering constraints that guarantee the action occurs while the node * is active (after start, before stop ... things like that). * * This is somewhat brittle in that we need to make sure the results of * this ordering are compatible with the result of get_router_node(). * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part * of this logic rather than action2xml(). */ if (remote->container) { crm_trace("Container ordering for %s", action->uuid); apply_container_ordering(action, data_set); } else { crm_trace("Remote ordering for %s", action->uuid); apply_remote_ordering(action, data_set); } } } static void order_probes(pe_working_set_t * data_set) { #if 0 GListPtr gIter = NULL; for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; /* Given "A then B", we would prefer to wait for A to be * started before probing B. * * If A was a filesystem on which the binaries and data for B * lived, it would have been useful if the author of B's agent * could assume that A is running before B.monitor will be * called. * * However we can't _only_ probe once A is running, otherwise * we'd not detect the state of B if A could not be started * for some reason. * * In practice however, we cannot even do an opportunistic * version of this because B may be moving: * * B.probe -> B.start * B.probe -> B.stop * B.stop -> B.start * A.stop -> A.start * A.start -> B.probe * * So far so good, but if we add the result of this code: * * B.stop -> A.stop * * Then we get a loop: * * B.probe -> B.stop -> A.stop -> A.start -> B.probe * * We could kill the 'B.probe -> B.stop' dependency, but that * could mean stopping B "too" soon, because B.start must wait * for the probes to complete. * * Another option is to allow it only if A is a non-unique * clone with clone-max == node-max (since we'll never be * moving it). However, we could still be stopping one * instance at the same time as starting another. * The complexity of checking for allowed conditions combined * with the ever narrowing usecase suggests that this code * should remain disabled until someone gets smarter. */ action_t *start = NULL; GListPtr actions = NULL; GListPtr probes = NULL; char *key = NULL; key = start_key(rsc); actions = find_actions(rsc->actions, key, NULL); free(key); if (actions) { start = actions->data; g_list_free(actions); } if(start == NULL) { crm_err("No start action for %s", rsc->id); continue; } key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0); probes = find_actions(rsc->actions, key, NULL); free(key); for (actions = start->actions_before; actions != NULL; actions = actions->next) { action_wrapper_t *before = (action_wrapper_t *) actions->data; GListPtr pIter = NULL; action_t *first = before->action; resource_t *first_rsc = first->rsc; if(first->required_runnable_before) { GListPtr clone_actions = NULL; for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) { before = (action_wrapper_t *) clone_actions->data; crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid); CRM_ASSERT(before->action->rsc); first_rsc = before->action->rsc; break; } } else if(safe_str_neq(first->task, RSC_START)) { crm_trace("Not a start op %s for %s", first->uuid, start->uuid); } if(first_rsc == NULL) { continue; } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) { crm_trace("Same parent %s for %s", first_rsc->id, start->uuid); continue; } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) { crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid); continue; } crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant); for (pIter = probes; pIter != NULL; pIter = pIter->next) { action_t *probe = (action_t *) pIter->data; crm_err("Ordering %s before %s", first->uuid, probe->uuid); order_actions(first, probe, pe_order_optional); } } } #endif } gboolean stage7(pe_working_set_t * data_set) { GListPtr gIter = NULL; crm_trace("Applying ordering constraints"); /* Don't ask me why, but apparently they need to be processed in * the order they were created in... go figure * * Also g_list_append() has horrendous performance characteristics * So we need to use g_list_prepend() and then reverse the list here */ data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints); for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) { pe__ordering_t *order = gIter->data; resource_t *rsc = order->lh_rsc; crm_trace("Applying ordering constraint: %d", order->id); if (rsc != NULL) { crm_trace("rsc_action-to-*"); rsc_order_first(rsc, order, data_set); continue; } rsc = order->rh_rsc; if (rsc != NULL) { crm_trace("action-to-rsc_action"); rsc_order_then(order->lh_action, rsc, order); } else { crm_trace("action-to-action"); order_actions(order->lh_action, order->rh_action, order->type); } } for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; update_colo_start_chain(action, data_set); } crm_trace("Ordering probes"); order_probes(data_set); crm_trace("Updating %d actions", g_list_length(data_set->actions)); for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; update_action(action, data_set); } LogNodeActions(data_set, FALSE); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; LogActions(rsc, data_set, FALSE); } return TRUE; } int transition_id = -1; /* * Create a dependency graph to send to the transitioner (via the controller) */ gboolean stage8(pe_working_set_t * data_set) { GListPtr gIter = NULL; const char *value = NULL; transition_id++; crm_trace("Creating transition graph %d.", transition_id); data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH); value = pe_pref(data_set->config_hash, "cluster-delay"); crm_xml_add(data_set->graph, "cluster-delay", value); value = pe_pref(data_set->config_hash, "stonith-timeout"); crm_xml_add(data_set->graph, "stonith-timeout", value); crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY"); if (is_set(data_set->flags, pe_flag_start_failure_fatal)) { crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY"); } else { crm_xml_add(data_set->graph, "failed-start-offset", "1"); } value = pe_pref(data_set->config_hash, "batch-limit"); crm_xml_add(data_set->graph, "batch-limit", value); crm_xml_add_int(data_set->graph, "transition_id", transition_id); value = pe_pref(data_set->config_hash, "migration-limit"); if (crm_int_helper(value, NULL) > 0) { crm_xml_add(data_set->graph, "migration-limit", value); } /* errors... slist_iter(action, action_t, action_list, lpc, if(action->optional == FALSE && action->runnable == FALSE) { print_action("Ignoring", action, TRUE); } ); */ gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id); rsc->cmds->expand(rsc, data_set); } crm_log_xml_trace(data_set->graph, "created resource-driven action list"); /* pseudo action to distribute list of nodes with maintenance state update */ add_maintenance_update(data_set); /* catch any non-resource specific actions */ crm_trace("processing non-resource actions"); gIter = data_set->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (action->rsc && action->node && action->node->details->shutdown && is_not_set(action->rsc->flags, pe_rsc_maintenance) && is_not_set(action->flags, pe_action_optional) && is_not_set(action->flags, pe_action_runnable) && crm_str_eq(action->task, RSC_STOP, TRUE) ) { /* Eventually we should just ignore the 'fence' case * But for now it's the best way to detect (in CTS) when * CIB resource updates are being lost */ if (is_set(data_set->flags, pe_flag_have_quorum) || data_set->no_quorum_policy == no_quorum_ignore) { crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)", action->node->details->unclean ? "fence" : "shut down", action->node->details->uname, action->rsc->id, is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked", is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "", action->uuid); } } graph_element_from_action(action, data_set); } crm_log_xml_trace(data_set->graph, "created generic action list"); crm_trace("Created transition graph %d.", transition_id); return TRUE; } void LogNodeActions(pe_working_set_t * data_set, gboolean terminal) { GListPtr gIter = NULL; for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { char *node_name = NULL; char *task = NULL; action_t *action = (action_t *) gIter->data; if (action->rsc != NULL) { continue; } else if (is_set(action->flags, pe_action_optional)) { continue; } if (is_container_remote_node(action->node)) { node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id); } else if(action->node) { node_name = crm_strdup_printf("%s", action->node->details->uname); } if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { task = strdup("Shutdown"); } else if (safe_str_eq(action->task, CRM_OP_FENCE)) { const char *op = g_hash_table_lookup(action->meta, "stonith_action"); task = crm_strdup_printf("Fence (%s)", op); } if(task == NULL) { /* Nothing to report */ } else if(terminal && action->reason) { printf(" * %s %s '%s'\n", task, node_name, action->reason); } else if(terminal) { printf(" * %s %s\n", task, node_name); } else if(action->reason) { crm_notice(" * %s %s '%s'\n", task, node_name, action->reason); } else { crm_notice(" * %s %s\n", task, node_name); } free(node_name); free(task); } } diff --git a/daemons/schedulerd/sched_utils.c b/daemons/schedulerd/sched_utils.c index 153dc27296..613dcf3d00 100644 --- a/daemons/schedulerd/sched_utils.c +++ b/daemons/schedulerd/sched_utils.c @@ -1,444 +1,471 @@ /* - * Copyright 2004-2018 Andrew Beekhof + * Copyright 2004-2019 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include pe__location_t * rsc2node_new(const char *id, pe_resource_t *rsc, int node_weight, const char *discover_mode, pe_node_t *foo_node, pe_working_set_t *data_set) { pe__location_t *new_con = NULL; if (rsc == NULL || id == NULL) { pe_err("Invalid constraint %s for rsc=%p", crm_str(id), rsc); return NULL; } else if (foo_node == NULL) { CRM_CHECK(node_weight == 0, return NULL); } new_con = calloc(1, sizeof(pe__location_t)); if (new_con != NULL) { new_con->id = strdup(id); new_con->rsc_lh = rsc; new_con->node_list_rh = NULL; new_con->role_filter = RSC_ROLE_UNKNOWN; if (discover_mode == NULL || safe_str_eq(discover_mode, "always")) { new_con->discover_mode = pe_discover_always; } else if (safe_str_eq(discover_mode, "never")) { new_con->discover_mode = pe_discover_never; } else if (safe_str_eq(discover_mode, "exclusive")) { new_con->discover_mode = pe_discover_exclusive; rsc->exclusive_discover = TRUE; } else { pe_err("Invalid %s value %s in location constraint", XML_LOCATION_ATTR_DISCOVERY, discover_mode); } if (foo_node != NULL) { node_t *copy = node_copy(foo_node); copy->weight = node_weight; new_con->node_list_rh = g_list_prepend(NULL, copy); } data_set->placement_constraints = g_list_prepend(data_set->placement_constraints, new_con); rsc->rsc_location = g_list_prepend(rsc->rsc_location, new_con); } return new_con; } gboolean can_run_resources(const node_t * node) { if (node == NULL) { return FALSE; } #if 0 if (node->weight < 0) { return FALSE; } #endif if (node->details->online == FALSE || node->details->shutdown || node->details->unclean || node->details->standby || node->details->maintenance) { crm_trace("%s: online=%d, unclean=%d, standby=%d, maintenance=%d", node->details->uname, node->details->online, node->details->unclean, node->details->standby, node->details->maintenance); return FALSE; } return TRUE; } struct node_weight_s { pe_node_t *active; pe_working_set_t *data_set; }; /* return -1 if 'a' is more preferred * return 1 if 'b' is more preferred */ static gint sort_node_weight(gconstpointer a, gconstpointer b, gpointer data) { const node_t *node1 = (const node_t *)a; const node_t *node2 = (const node_t *)b; struct node_weight_s *nw = data; int node1_weight = 0; int node2_weight = 0; int result = 0; if (a == NULL) { return 1; } if (b == NULL) { return -1; } node1_weight = node1->weight; node2_weight = node2->weight; if (can_run_resources(node1) == FALSE) { node1_weight = -INFINITY; } if (can_run_resources(node2) == FALSE) { node2_weight = -INFINITY; } if (node1_weight > node2_weight) { crm_trace("%s (%d) > %s (%d) : weight", node1->details->uname, node1_weight, node2->details->uname, node2_weight); return -1; } if (node1_weight < node2_weight) { crm_trace("%s (%d) < %s (%d) : weight", node1->details->uname, node1_weight, node2->details->uname, node2_weight); return 1; } crm_trace("%s (%d) == %s (%d) : weight", node1->details->uname, node1_weight, node2->details->uname, node2_weight); if (safe_str_eq(nw->data_set->placement_strategy, "minimal")) { goto equal; } if (safe_str_eq(nw->data_set->placement_strategy, "balanced")) { result = compare_capacity(node1, node2); if (result < 0) { crm_trace("%s > %s : capacity (%d)", node1->details->uname, node2->details->uname, result); return -1; } else if (result > 0) { crm_trace("%s < %s : capacity (%d)", node1->details->uname, node2->details->uname, result); return 1; } } /* now try to balance resources across the cluster */ if (node1->details->num_resources < node2->details->num_resources) { crm_trace("%s (%d) > %s (%d) : resources", node1->details->uname, node1->details->num_resources, node2->details->uname, node2->details->num_resources); return -1; } else if (node1->details->num_resources > node2->details->num_resources) { crm_trace("%s (%d) < %s (%d) : resources", node1->details->uname, node1->details->num_resources, node2->details->uname, node2->details->num_resources); return 1; } if (nw->active && nw->active->details == node1->details) { crm_trace("%s (%d) > %s (%d) : active", node1->details->uname, node1->details->num_resources, node2->details->uname, node2->details->num_resources); return -1; } else if (nw->active && nw->active->details == node2->details) { crm_trace("%s (%d) < %s (%d) : active", node1->details->uname, node1->details->num_resources, node2->details->uname, node2->details->num_resources); return 1; } equal: crm_trace("%s = %s", node1->details->uname, node2->details->uname); return strcmp(node1->details->uname, node2->details->uname); } GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set) { struct node_weight_s nw = { active_node, data_set }; return g_list_sort_with_data(nodes, sort_node_weight, &nw); } void native_deallocate(resource_t * rsc) { if (rsc->allocated_to) { node_t *old = rsc->allocated_to; crm_info("Deallocating %s from %s", rsc->id, old->details->uname); set_bit(rsc->flags, pe_rsc_provisional); rsc->allocated_to = NULL; old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, rsc); old->details->num_resources--; /* old->count--; */ calculate_utilization(old->details->utilization, rsc->utilization, TRUE); free(old); } } gboolean native_assign_node(resource_t * rsc, GListPtr nodes, node_t * chosen, gboolean force) { CRM_ASSERT(rsc->variant == pe_native); if (force == FALSE && chosen != NULL) { bool unset = FALSE; if(chosen->weight < 0) { unset = TRUE; // Allow the graph to assume that the remote resource will come up } else if(can_run_resources(chosen) == FALSE && !is_container_remote_node(chosen)) { unset = TRUE; } if(unset) { crm_debug("All nodes for resource %s are unavailable" ", unclean or shutting down (%s: %d, %d)", rsc->id, chosen->details->uname, can_run_resources(chosen), chosen->weight); rsc->next_role = RSC_ROLE_STOPPED; chosen = NULL; } } /* todo: update the old node for each resource to reflect its * new resource count */ native_deallocate(rsc); clear_bit(rsc->flags, pe_rsc_provisional); if (chosen == NULL) { GListPtr gIter = NULL; char *rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING); crm_debug("Could not allocate a node for %s", rsc->id); rsc->next_role = RSC_ROLE_STOPPED; for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { action_t *op = (action_t *) gIter->data; const char *interval_ms_s = g_hash_table_lookup(op->meta, XML_LRM_ATTR_INTERVAL_MS); crm_debug("Processing %s", op->uuid); if(safe_str_eq(RSC_STOP, op->task)) { update_action_flags(op, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__); } else if(safe_str_eq(RSC_START, op->task)) { update_action_flags(op, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); /* set_bit(rsc->flags, pe_rsc_block); */ } else if (interval_ms_s && safe_str_neq(interval_ms_s, "0")) { if(safe_str_eq(rc_inactive, g_hash_table_lookup(op->meta, XML_ATTR_TE_TARGET_RC))) { /* This is a recurring monitor for the stopped state, leave it alone */ } else { /* Normal monitor operation, cancel it */ update_action_flags(op, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); } } } free(rc_inactive); return FALSE; } crm_debug("Assigning %s to %s", chosen->details->uname, rsc->id); rsc->allocated_to = node_copy(chosen); chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc, rsc); chosen->details->num_resources++; chosen->count++; calculate_utilization(chosen->details->utilization, rsc->utilization, FALSE); dump_rsc_utilization(show_utilization ? 0 : utilization_log_level, __FUNCTION__, rsc, chosen); return TRUE; } void log_action(unsigned int log_level, const char *pre_text, action_t * action, gboolean details) { const char *node_uname = NULL; const char *node_uuid = NULL; if (action == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } if (is_set(action->flags, pe_action_pseudo)) { node_uname = NULL; node_uuid = NULL; } else if (action->node != NULL) { node_uname = action->node->details->uname; node_uuid = action->node->details->id; } else { node_uname = ""; node_uuid = NULL; } switch (text2task(action->task)) { case stonith_node: case shutdown_crm: crm_trace("%s%s%sAction %d: %s%s%s%s%s%s", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", is_set(action->flags, pe_action_pseudo) ? "Pseudo " : is_set(action->flags, pe_action_optional) ? "Optional " : is_set(action->flags, pe_action_runnable) ? is_set(action->flags, pe_action_processed) ? "" : "(Provisional) " : "!!Non-Startable!! ", action->id, action->uuid, node_uname ? "\ton " : "", node_uname ? node_uname : "", node_uuid ? "\t\t(" : "", node_uuid ? node_uuid : "", node_uuid ? ")" : ""); break; default: crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", is_set(action->flags, pe_action_optional) ? "Optional " : is_set(action->flags, pe_action_pseudo) ? "Pseudo " : is_set(action->flags, pe_action_runnable) ? is_set(action->flags, pe_action_processed) ? "" : "(Provisional) " : "!!Non-Startable!! ", action->id, action->uuid, action->rsc ? action->rsc->id : "", node_uname ? "\ton " : "", node_uname ? node_uname : "", node_uuid ? "\t\t(" : "", node_uuid ? node_uuid : "", node_uuid ? ")" : ""); break; } if (details) { GListPtr gIter = NULL; crm_trace("\t\t====== Preceding Actions"); gIter = action->actions_before; for (; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other = (action_wrapper_t *) gIter->data; log_action(log_level + 1, "\t\t", other->action, FALSE); } crm_trace("\t\t====== Subsequent Actions"); gIter = action->actions_after; for (; gIter != NULL; gIter = gIter->next) { action_wrapper_t *other = (action_wrapper_t *) gIter->data; log_action(log_level + 1, "\t\t", other->action, FALSE); } crm_trace("\t\t====== End"); } else { crm_trace("\t\t(before=%d, after=%d)", g_list_length(action->actions_before), g_list_length(action->actions_after)); } } gboolean can_run_any(GHashTable * nodes) { GHashTableIter iter; node_t *node = NULL; if (nodes == NULL) { return FALSE; } g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (can_run_resources(node) && node->weight >= 0) { return TRUE; } } return FALSE; } pe_action_t * create_pseudo_resource_op(resource_t * rsc, const char *task, bool optional, bool runnable, pe_working_set_t *data_set) { pe_action_t *action = custom_action(rsc, generate_op_key(rsc->id, task, 0), task, NULL, optional, TRUE, data_set); update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__); update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__); if(runnable) { update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__); } return action; } /*! * \internal * \brief Create an executor cancel op * * \param[in] rsc Resource of action to cancel * \param[in] task Name of action to cancel * \param[in] interval_ms Interval of action to cancel * \param[in] node Node of action to cancel * \param[in] data_set Working set of cluster * * \return Created op */ pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *task, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set) { pe_action_t *cancel_op; char *interval_ms_s = crm_strdup_printf("%u", interval_ms); // @TODO dangerous if possible to schedule another action with this key char *key = generate_op_key(rsc->id, task, interval_ms); cancel_op = custom_action(rsc, key, RSC_CANCEL, node, FALSE, TRUE, data_set); free(cancel_op->task); cancel_op->task = strdup(RSC_CANCEL); free(cancel_op->cancel_task); cancel_op->cancel_task = strdup(task); add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, task); add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL_MS, interval_ms_s); free(interval_ms_s); return cancel_op; } + +/*! + * \internal + * \brief Create a shutdown op for a scheduler transition + * + * \param[in] rsc Resource of action to cancel + * \param[in] task Name of action to cancel + * \param[in] interval_ms Interval of action to cancel + * \param[in] node Node of action to cancel + * \param[in] data_set Working set of cluster + * + * \return Created op + */ +pe_action_t * +sched_shutdown_op(pe_node_t *node, pe_working_set_t *data_set) +{ + char *shutdown_id = crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, + node->details->uname); + + pe_action_t *shutdown_op = custom_action(NULL, shutdown_id, CRM_OP_SHUTDOWN, + node, FALSE, TRUE, data_set); + + crm_notice("Scheduling shutdown of node %s", node->details->uname); + shutdown_constraints(node, shutdown_op, data_set); + add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); + return shutdown_op; +} diff --git a/daemons/schedulerd/sched_utils.h b/daemons/schedulerd/sched_utils.h index b049dd7c01..6c3cb460a6 100644 --- a/daemons/schedulerd/sched_utils.h +++ b/daemons/schedulerd/sched_utils.h @@ -1,76 +1,77 @@ /* - * Copyright 2004-2018 Andrew Beekhof + * Copyright 2004-2019 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PENGINE_AUTILS__H # define PENGINE_AUTILS__H /* Constraint helper functions */ extern rsc_colocation_t *invert_constraint(rsc_colocation_t * constraint); pe__location_t *copy_constraint(pe__location_t *constraint); pe__location_t *rsc2node_new(const char *id, pe_resource_t *rsc, int weight, const char *discovery_mode, pe_node_t *node, pe_working_set_t *data_set); extern gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, resource_t * rsc_lh, resource_t * rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t * data_set); extern gboolean rsc_ticket_new(const char *id, resource_t * rsc_lh, ticket_t * ticket, const char *state_lh, const char *loss_policy, pe_working_set_t * data_set); GList *sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set); extern gboolean can_run_resources(const node_t * node); extern gboolean native_assign_node(resource_t * rsc, GListPtr candidates, node_t * chosen, gboolean force); void native_deallocate(resource_t * rsc); extern void log_action(unsigned int log_level, const char *pre_text, action_t * action, gboolean details); gboolean can_run_any(GHashTable * nodes); bool can_interleave_actions(pe_action_t *first, pe_action_t *then); pe_resource_t *find_compatible_child(pe_resource_t *local_child, pe_resource_t *rsc, enum rsc_role_e filter, gboolean current, pe_working_set_t *data_set); resource_t *find_compatible_child_by_node(resource_t * local_child, node_t * local_node, resource_t * rsc, enum rsc_role_e filter, gboolean current); gboolean is_child_compatible(resource_t *child_rsc, node_t * local_node, enum rsc_role_e filter, gboolean current); bool assign_node(resource_t * rsc, node_t * node, gboolean force); enum pe_action_flags summary_action_flags(action_t * action, GListPtr children, node_t * node); enum action_tasks clone_child_action(action_t * action); int copies_per_node(resource_t * rsc); enum filter_colocation_res { influence_nothing = 0, influence_rsc_location, influence_rsc_priority, }; extern enum filter_colocation_res filter_colocation_constraint(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint, gboolean preview); extern int compare_capacity(const node_t * node1, const node_t * node2); extern void calculate_utilization(GHashTable * current_utilization, GHashTable * utilization, gboolean plus); extern void process_utilization(resource_t * rsc, node_t ** prefer, pe_working_set_t * data_set); pe_action_t *create_pseudo_resource_op(resource_t * rsc, const char *task, bool optional, bool runnable, pe_working_set_t *data_set); pe_action_t *pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set); +pe_action_t *sched_shutdown_op(pe_node_t *node, pe_working_set_t *data_set); # define LOAD_STOPPED "load_stopped" #endif diff --git a/doc/Pacemaker_Administration/en-US/Ch-Configuring.txt b/doc/Pacemaker_Administration/en-US/Ch-Configuring.txt index 5ca9dfc32e..286479bbc5 100644 --- a/doc/Pacemaker_Administration/en-US/Ch-Configuring.txt +++ b/doc/Pacemaker_Administration/en-US/Ch-Configuring.txt @@ -1,436 +1,442 @@ :compat-mode: legacy = Configuring Pacemaker = == How Should the Configuration be Updated? == There are three basic rules for updating the cluster configuration: * Rule 1 - Never edit the +cib.xml+ file manually. Ever. I'm not making this up. * Rule 2 - Read Rule 1 again. * Rule 3 - The cluster will notice if you ignored rules 1 & 2 and refuse to use the configuration. Now that it is clear how 'not' to update the configuration, we can begin to explain how you 'should'. === Editing the CIB Using XML === The most powerful tool for modifying the configuration is the +cibadmin+ command. With +cibadmin+, you can query, add, remove, update or replace any part of the configuration. All changes take effect immediately, so there is no need to perform a reload-like operation. The simplest way of using `cibadmin` is to use it to save the current configuration to a temporary file, edit that file with your favorite text or XML editor, and then upload the revised configuration. footnote:[This process might appear to risk overwriting changes that happen after the initial cibadmin call, but pacemaker will reject any update that is "too old". If the CIB is updated in some other fashion after the initial cibadmin, the second cibadmin will be rejected because the version number will be too low.] .Safely using an editor to modify the cluster configuration ====== -------- # cibadmin --query > tmp.xml # vi tmp.xml # cibadmin --replace --xml-file tmp.xml -------- ====== Some of the better XML editors can make use of a Relax NG schema to help make sure any changes you make are valid. The schema describing the configuration can be found in +pacemaker.rng+, which may be deployed in a location such as +/usr/share/pacemaker+ or +/usr/lib/heartbeat+ depending on your operating system and how you installed the software. If you want to modify just one section of the configuration, you can query and replace just that section to avoid modifying any others. .Safely using an editor to modify only the resources section ====== -------- # cibadmin --query --scope resources > tmp.xml # vi tmp.xml # cibadmin --replace --scope resources --xml-file tmp.xml -------- ====== === Quickly Deleting Part of the Configuration === Identify the object you wish to delete by XML tag and id. For example, you might search the CIB for all STONITH-related configuration: .Searching for STONITH-related configuration items ====== ---- # cibadmin -Q | grep stonith ---- ====== If you wanted to delete the +primitive+ tag with id +child_DoFencing+, you would run: ---- # cibadmin --delete --xml-text '' ---- === Updating the Configuration Without Using XML === Most tasks can be performed with one of the other command-line tools provided with pacemaker, avoiding the need to read or edit XML. To enable STONITH for example, one could run: ---- # crm_attribute --name stonith-enabled --update 1 ---- Or, to check whether *somenode* is allowed to run resources, there is: ---- # crm_standby --query --node somenode ---- Or, to find the current location of *my-test-rsc*, one can use: ---- # crm_resource --locate --resource my-test-rsc ---- Examples of using these tools for specific cases will be given throughout this document where appropriate. [[s-config-sandboxes]] == Making Configuration Changes in a Sandbox == Often it is desirable to preview the effects of a series of changes before updating the configuration all at once. For this purpose, we have created `crm_shadow` which creates a "shadow" copy of the configuration and arranges for all the command line tools to use it. To begin, simply invoke `crm_shadow --create` with the name of a configuration to create footnote:[Shadow copies are identified with a name, making it possible to have more than one.], and follow the simple on-screen instructions. [WARNING] ==== Read this section and the on-screen instructions carefully; failure to do so could result in destroying the cluster's active configuration! ==== .Creating and displaying the active sandbox ====== ---- # crm_shadow --create test Setting up shadow instance Type Ctrl-D to exit the crm_shadow shell shadow[test]: shadow[test] # crm_shadow --which test ---- ====== From this point on, all cluster commands will automatically use the shadow copy instead of talking to the cluster's active configuration. Once you have finished experimenting, you can either make the changes active via the `--commit` option, or discard them using the `--delete` option. Again, be sure to follow the on-screen instructions carefully! For a full list of `crm_shadow` options and commands, invoke it with the `--help` option. .Use sandbox to make multiple changes all at once, discard them, and verify real configuration is untouched ====== ---- shadow[test] # crm_failcount -r rsc_c001n01 -G scope=status name=fail-count-rsc_c001n01 value=0 shadow[test] # crm_standby --node c001n02 -v on shadow[test] # crm_standby --node c001n02 -G scope=nodes name=standby value=on shadow[test] # cibadmin --erase --force shadow[test] # cibadmin --query shadow[test] # crm_shadow --delete test --force Now type Ctrl-D to exit the crm_shadow shell shadow[test] # exit # crm_shadow --which No active shadow configuration defined # cibadmin -Q ---- ====== [[s-config-testing-changes]] == Testing Your Configuration Changes == We saw previously how to make a series of changes to a "shadow" copy of the configuration. Before loading the changes back into the cluster (e.g. `crm_shadow --commit mytest --force`), it is often advisable to simulate the effect of the changes with +crm_simulate+. For example: ---- # crm_simulate --live-check -VVVVV --save-graph tmp.graph --save-dotfile tmp.dot ---- This tool uses the same library as the live cluster to show what it would have done given the supplied input. Its output, in addition to a significant amount of logging, is stored in two files +tmp.graph+ and +tmp.dot+. Both files are representations of the same thing: the cluster's response to your changes. The graph file stores the complete transition from the existing cluster state to your desired new state, containing a list of all the actions, their parameters and their pre-requisites. Because the transition graph is not terribly easy to read, the tool also generates a Graphviz footnote:[Graph visualization software. See http://www.graphviz.org/ for details.] dot-file representing the same information. For information on the options supported by `crm_simulate`, use its `--help` option. .Interpreting the Graphviz output * Arrows indicate ordering dependencies * Dashed arrows indicate dependencies that are not present in the transition graph * Actions with a dashed border of any color do not form part of the transition graph * Actions with a green border form part of the transition graph * Actions with a red border are ones the cluster would like to execute but cannot run * Actions with a blue border are ones the cluster does not feel need to be executed * Actions with orange text are pseudo/pretend actions that the cluster uses to simplify the graph * Actions with black text are sent to the LRM * Resource actions have text of the form pass:[rsc]_pass:[action]_pass:[interval] pass:[node] * Any action depending on an action with a red border will not be able to execute. * Loops are _really_ bad. Please report them to the development team. === Small Cluster Transition === image::images/Policy-Engine-small.png["An example transition graph as represented by Graphviz",width="16cm",height="6cm",align="center"] In the above example, it appears that a new node, *pcmk-2*, has come online and that the cluster is checking to make sure *rsc1*, *rsc2* and *rsc3* are not already running there (Indicated by the *rscN_monitor_0* entries). Once it did that, and assuming the resources were not active there, it would have liked to stop *rsc1* and *rsc2* on *pcmk-1* and move them to *pcmk-2*. However, there appears to be some problem and the cluster cannot or is not permitted to perform the stop actions which implies it also cannot perform the start actions. For some reason the cluster does not want to start *rsc3* anywhere. === Complex Cluster Transition === image::images/Policy-Engine-big.png["Another, slightly more complex, transition graph that you're not expected to be able to read",width="16cm",height="20cm",align="center"] == Do I Need to Update the Configuration on All Cluster Nodes? == No. Any changes are immediately synchronized to the other active members of the cluster. To reduce bandwidth, the cluster only broadcasts the incremental updates that result from your changes and uses MD5 checksums to ensure that each copy is completely consistent. == Working with CIB Properties == Although these fields can be written to by the user, in most cases the cluster will overwrite any values specified by the user with the "correct" ones. To change the ones that can be specified by the user, for example +admin_epoch+, one should use: ---- # cibadmin --modify --xml-text '' ---- A complete set of CIB properties will look something like this: .Attributes set for a cib object ====== [source,XML] ------- ------- ====== == Querying and Setting Cluster Options == indexterm:[Querying,Cluster Option] indexterm:[Setting,Cluster Option] indexterm:[Cluster,Querying Options] indexterm:[Cluster,Setting Options] Cluster options can be queried and modified using the `crm_attribute` tool. To get the current value of +cluster-delay+, you can run: ---- # crm_attribute --query --name cluster-delay ---- which is more simply written as ---- # crm_attribute -G -n cluster-delay ---- If a value is found, you'll see a result like this: ---- # crm_attribute -G -n cluster-delay scope=crm_config name=cluster-delay value=60s ---- If no value is found, the tool will display an error: ---- # crm_attribute -G -n clusta-deway scope=crm_config name=clusta-deway value=(null) Error performing operation: No such device or address ---- To use a different value (for example, 30 seconds), simply run: ---- # crm_attribute --name cluster-delay --update 30s ---- To go back to the cluster's default value, you can delete the value, for example: ---- # crm_attribute --name cluster-delay --delete Deleted crm_config option: id=cib-bootstrap-options-cluster-delay name=cluster-delay ---- === When Options are Listed More Than Once === If you ever see something like the following, it means that the option you're modifying is present more than once. .Deleting an option that is listed twice ======= ------ # crm_attribute --name batch-limit --delete Multiple attributes match name=batch-limit in crm_config: Value: 50 (set=cib-bootstrap-options, id=cib-bootstrap-options-batch-limit) Value: 100 (set=custom, id=custom-batch-limit) Please choose from one of the matches above and supply the 'id' with --id ------- ======= In such cases, follow the on-screen instructions to perform the requested action. To determine which value is currently being used by the cluster, refer to the 'Rules' chapter of 'Pacemaker Explained'. [[s-remote-connection]] == Connecting from a Remote Machine == indexterm:[Cluster,Remote connection] indexterm:[Cluster,Remote administration] Provided Pacemaker is installed on a machine, it is possible to connect to the cluster even if the machine itself is not in the same cluster. To do this, one simply sets up a number of environment variables and runs the same commands as when working on a cluster node. .Environment Variables Used to Connect to Remote Instances of the CIB [width="95%",cols="1m,1,<3",options="header",align="center"] |========================================================= |Environment Variable |Default |Description |CIB_user |$USER |The user to connect as. Needs to be part of the +haclient+ group on the target host. indexterm:[Environment Variable,CIB_user] |CIB_passwd | |The user's password. Read from the command line if unset. indexterm:[Environment Variable,CIB_passwd] |CIB_server |localhost |The host to contact indexterm:[Environment Variable,CIB_server] |CIB_port | |The port on which to contact the server; required. indexterm:[Environment Variable,CIB_port] |CIB_encrypted |TRUE |Whether to encrypt network traffic indexterm:[Environment Variable,CIB_encrypted] |========================================================= So, if *c001n01* is an active cluster node and is listening on port 1234 for connections, and *someuser* is a member of the *haclient* group, then the following would prompt for *someuser*'s password and return the cluster's current configuration: ---- # export CIB_port=1234; export CIB_server=c001n01; export CIB_user=someuser; # cibadmin -Q ---- For security reasons, the cluster does not listen for remote connections by default. If you wish to allow remote access, you need to set the +remote-tls-port+ (encrypted) or +remote-clear-port+ (unencrypted) CIB properties (i.e., those kept in the +cib+ tag, like +num_updates+ and +epoch+). .Extra top-level CIB properties for remote access [width="95%",cols="1m,1,<3",options="header",align="center"] |========================================================= |Field |Default |Description |remote-tls-port |_none_ |Listen for encrypted remote connections on this port. indexterm:[remote-tls-port,Remote Connection Option] indexterm:[Remote Connection,Option,remote-tls-port] |remote-clear-port |_none_ |Listen for plaintext remote connections on this port. indexterm:[remote-clear-port,Remote Connection Option] indexterm:[Remote Connection,Option,remote-clear-port] |========================================================= +[IMPORTANT] +==== +The Pacemaker version on the administration host must be the same or greater +than the version(s) on the cluster nodes. Otherwise, it may not have the schema +files necessary to validate the CIB. +==== diff --git a/include/crm/stonith-ng.h b/include/crm/stonith-ng.h index 62a72d9d6e..df99b550f8 100644 --- a/include/crm/stonith-ng.h +++ b/include/crm/stonith-ng.h @@ -1,528 +1,527 @@ /* - * Copyright 2004-2018 Andrew Beekhof + * Copyright 2004-2019 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifdef __cplusplus extern "C" { #endif /** * \file * \brief Fencing aka. STONITH * \ingroup fencing */ #ifndef STONITH_NG__H # define STONITH_NG__H # include # include -# include - -/* TO-DO: Work out how to drop this requirement */ -# include +# include // bool +# include // uint32_t +# include // time_t # define T_STONITH_NOTIFY_DISCONNECT "st_notify_disconnect" # define T_STONITH_NOTIFY_FENCE "st_notify_fence" # define T_STONITH_NOTIFY_HISTORY "st_notify_history" /* *INDENT-OFF* */ enum stonith_state { stonith_connected_command, stonith_connected_query, stonith_disconnected, }; enum stonith_call_options { st_opt_none = 0x00000000, st_opt_verbose = 0x00000001, st_opt_allow_suicide = 0x00000002, st_opt_manual_ack = 0x00000008, st_opt_discard_reply = 0x00000010, /* st_opt_all_replies = 0x00000020, */ st_opt_topology = 0x00000040, st_opt_scope_local = 0x00000100, st_opt_cs_nodeid = 0x00000200, st_opt_sync_call = 0x00001000, /*! Allow the timeout period for a callback to be adjusted * based on the time the server reports the operation will take. */ st_opt_timeout_updates = 0x00002000, /*! Only report back if operation is a success in callback */ st_opt_report_only_success = 0x00004000, /* used where ever apropriate - e.g. cleanup of history */ st_opt_cleanup = 0x000080000, /* used where ever apropriate - e.g. send out a history query to all nodes */ st_opt_broadcast = 0x000100000, }; /*! Order matters here, do not change values */ enum op_state { st_query, st_exec, st_done, st_duplicate, st_failed, }; // Supported fence agent interface standards enum stonith_namespace { st_namespace_invalid, st_namespace_any, st_namespace_internal, // Implemented internally by Pacemaker /* Neither of these projects are active any longer, but the fence agent * interfaces they created are still in use and supported by Pacemaker. */ st_namespace_rhcs, // Red Hat Cluster Suite compatible st_namespace_lha, // Linux-HA compatible }; enum stonith_namespace stonith_text2namespace(const char *namespace_s); const char *stonith_namespace2text(enum stonith_namespace st_namespace); enum stonith_namespace stonith_get_namespace(const char *agent, const char *namespace_s); typedef struct stonith_key_value_s { char *key; char *value; struct stonith_key_value_s *next; } stonith_key_value_t; typedef struct stonith_history_s { char *target; char *action; char *origin; char *delegate; char *client; int state; time_t completed; struct stonith_history_s *next; } stonith_history_t; typedef struct stonith_s stonith_t; typedef struct stonith_event_s { char *id; char *type; char *message; char *operation; int result; char *origin; char *target; char *action; char *executioner; char *device; /*! The name of the client that initiated the action. */ char *client_origin; } stonith_event_t; typedef struct stonith_callback_data_s { int rc; int call_id; void *userdata; } stonith_callback_data_t; typedef struct stonith_api_operations_s { /*! * \brief Destroy the stonith api structure. */ int (*free) (stonith_t *st); /*! * \brief Connect to the local stonith daemon. * * \retval 0, success * \retval negative error code on failure */ int (*connect) (stonith_t *st, const char *name, int *stonith_fd); /*! * \brief Disconnect from the local stonith daemon. * * \retval 0, success * \retval negative error code on failure */ int (*disconnect)(stonith_t *st); /*! * \brief Remove a registered stonith device with the local stonith daemon. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \retval 0, success * \retval negative error code on failure */ int (*remove_device)( stonith_t *st, int options, const char *name); /*! * \brief Register a stonith device with the local stonith daemon. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \retval 0, success * \retval negative error code on failure */ int (*register_device)( stonith_t *st, int options, const char *id, const char *provider, const char *agent, stonith_key_value_t *params); /*! * \brief Remove a fencing level for a specific node. * * \note This feature is not available when stonith is in standalone mode. * * \retval 0, success * \retval negative error code on failure */ int (*remove_level)( stonith_t *st, int options, const char *node, int level); /*! * \brief Register a fencing level containing the fencing devices to be used * at that level for a specific node. * * \note This feature is not available when stonith is in standalone mode. * * \retval 0, success * \retval negative error code on failure */ int (*register_level)( stonith_t *st, int options, const char *node, int level, stonith_key_value_t *device_list); /*! * \brief Get the metadata documentation for a resource. * * \note Value is returned in output. Output must be freed when set. * * \retval 0 success * \retval negative error code on failure */ int (*metadata)(stonith_t *st, int options, const char *device, const char *provider, char **output, int timeout); /*! * \brief Retrieve a list of installed stonith agents * * \note if provider is not provided, all known agents will be returned * \note list must be freed using stonith_key_value_freeall() * \note call_options parameter is not used, it is reserved for future use. * * \retval num items in list on success * \retval negative error code on failure */ int (*list_agents)(stonith_t *stonith, int call_options, const char *provider, stonith_key_value_t **devices, int timeout); /*! * \brief Retrieve string listing hosts and port assignments from a local stonith device. * * \retval 0 on success * \retval negative error code on failure */ int (*list)(stonith_t *st, int options, const char *id, char **list_output, int timeout); /*! * \brief Check to see if a local stonith device is reachable * * \retval 0 on success * \retval negative error code on failure */ int (*monitor)(stonith_t *st, int options, const char *id, int timeout); /*! * \brief Check to see if a local stonith device's port is reachable * * \retval 0 on success * \retval negative error code on failure */ int (*status)(stonith_t *st, int options, const char *id, const char *port, int timeout); /*! * \brief Retrieve a list of registered stonith devices. * * \note If node is provided, only devices that can fence the node id * will be returned. * * \retval num items in list on success * \retval negative error code on failure */ int (*query)(stonith_t *st, int options, const char *node, stonith_key_value_t **devices, int timeout); /*! * \brief Issue a fencing action against a node. * * \note Possible actions are, 'on', 'off', and 'reboot'. * * \param st, stonith connection * \param options, call options * \param node, The target node to fence * \param action, The fencing action to take * \param timeout, The default per device timeout to use with each device * capable of fencing the target. * * \retval 0 success * \retval negative error code on failure. */ int (*fence)(stonith_t *st, int options, const char *node, const char *action, int timeout, int tolerance); /*! * \brief Manually confirm that a node is down. * * \retval 0 success * \retval negative error code on failure. */ int (*confirm)(stonith_t *st, int options, const char *node); /*! * \brief Retrieve a list of fencing operations that have occurred for a specific node. * * \note History is not available in standalone mode. * * \retval 0 success * \retval negative error code on failure. */ int (*history)(stonith_t *st, int options, const char *node, stonith_history_t **output, int timeout); int (*register_notification)( stonith_t *st, const char *event, void (*notify)(stonith_t *st, stonith_event_t *e)); int (*remove_notification)(stonith_t *st, const char *event); /*! * \brief Register a callback to receive the result of an async call id * * \param call_id, The call id to register the callback for. * \param timeout, The default timeout period to wait until this callback expires * \param options, Option flags, st_opt_timeout_updates and st_opt_report_only_success are the * only valid options for this function. * \param userdate, A pointer that will be handed back in the callback. * \param callback_name, Unique name given to callback * \param callback, The callback function * * \retval 0 success * \retval negative error code on failure. */ int (*register_callback)(stonith_t *st, int call_id, int timeout, int options, void *userdata, const char *callback_name, void (*callback)(stonith_t *st, stonith_callback_data_t *data)); /*! * \brief Remove a registered callback for a given call id. */ int (*remove_callback)(stonith_t *st, int call_id, bool all_callbacks); /*! * \brief Remove fencing level for specific node, node regex or attribute * * \param[in] st Fencer connection to use * \param[in] options Bitmask of stonith_call_options to pass to the fencer * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to remove * * \return 0 on success, negative error code otherwise * * \note This feature is not available when stonith is in standalone mode. * The caller should set only one of node, pattern or attr/value. */ int (*remove_level_full)(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level); /*! * \brief Register fencing level for specific node, node regex or attribute * * \param[in] st Fencer connection to use * \param[in] options Bitmask of stonith_call_options to pass to fencer * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to add * \param[in] device_list Devices to use in level * * \return 0 on success, negative error code otherwise * * \note This feature is not available when stonith is in standalone mode. * The caller should set only one of node, pattern or attr/value. */ int (*register_level_full)(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level, stonith_key_value_t *device_list); /*! * \brief Validate an arbitrary stonith device configuration * * \param[in] st Stonithd connection to use * \param[in] call_options Bitmask of stonith_call_options to use with fencer * \param[in] rsc_id ID used to replace CIB secrets in params * \param[in] namespace_s Namespace of fence agent to validate (optional) * \param[in] agent Fence agent to validate * \param[in] params Configuration parameters to pass to fence agent * \param[in] timeout Fail if no response within this many seconds * \param[out] output If non-NULL, where to store any agent output * \param[out] error_output If non-NULL, where to store agent error output * * \return pcmk_ok if validation succeeds, -errno otherwise * * \note If pcmk_ok is returned, the caller is responsible for freeing * the output (if requested). */ int (*validate)(stonith_t *st, int call_options, const char *rsc_id, const char *namespace_s, const char *agent, stonith_key_value_t *params, int timeout, char **output, char **error_output); } stonith_api_operations_t; struct stonith_s { enum stonith_state state; int call_id; int call_timeout; void *st_private; stonith_api_operations_t *cmds; }; /* *INDENT-ON* */ /* Core functions */ stonith_t *stonith_api_new(void); void stonith_api_delete(stonith_t * st); void stonith_dump_pending_callbacks(stonith_t * st); // deprecated (use stonith_get_namespace() instead) const char *get_stonith_provider(const char *agent, const char *provider); bool stonith_dispatch(stonith_t * st); stonith_key_value_t *stonith_key_value_add(stonith_key_value_t * kvp, const char *key, const char *value); void stonith_key_value_freeall(stonith_key_value_t * kvp, int keys, int values); void stonith_history_free(stonith_history_t *history); /* Basic helpers that allows nodes to be fenced and the history to be * queried without mainloop or the caller understanding the full API * * At least one of nodeid and uname are required */ int stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off); time_t stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress); /* * Helpers for using the above functions without install-time dependencies * * Usage: * #include * * To turn a node off by corosync nodeid: * stonith_api_kick_helper(nodeid, 120, 1); * * To check the last fence date/time (also by nodeid): * last = stonith_api_time_helper(nodeid, 0); * * To check if fencing is in progress: * if(stonith_api_time_helper(nodeid, 1) > 0) { ... } * * eg. #include #include #include int main(int argc, char ** argv) { int rc = 0; int nodeid = 102; rc = stonith_api_time_helper(nodeid, 0); printf("%d last fenced at %s\n", nodeid, ctime(rc)); rc = stonith_api_kick_helper(nodeid, 120, 1); printf("%d fence result: %d\n", nodeid, rc); rc = stonith_api_time_helper(nodeid, 0); printf("%d last fenced at %s\n", nodeid, ctime(rc)); return 0; } */ # define STONITH_LIBRARY "libstonithd.so.26" typedef int (*st_api_kick_fn) (int nodeid, const char *uname, int timeout, bool off); typedef time_t (*st_api_time_fn) (int nodeid, const char *uname, bool in_progress); static inline int stonith_api_kick_helper(uint32_t nodeid, int timeout, bool off) { static void *st_library = NULL; static st_api_kick_fn st_kick_fn; if (st_library == NULL) { st_library = dlopen(STONITH_LIBRARY, RTLD_LAZY); } if (st_library && st_kick_fn == NULL) { st_kick_fn = (st_api_kick_fn) dlsym(st_library, "stonith_api_kick"); } if (st_kick_fn == NULL) { #ifdef ELIBACC return -ELIBACC; #else return -ENOSYS; #endif } return (*st_kick_fn) (nodeid, NULL, timeout, off); } static inline time_t stonith_api_time_helper(uint32_t nodeid, bool in_progress) { static void *st_library = NULL; static st_api_time_fn st_time_fn; if (st_library == NULL) { st_library = dlopen(STONITH_LIBRARY, RTLD_LAZY); } if (st_library && st_time_fn == NULL) { st_time_fn = (st_api_time_fn) dlsym(st_library, "stonith_api_time"); } if (st_time_fn == NULL) { return 0; } return (*st_time_fn) (nodeid, NULL, in_progress); } #ifdef __cplusplus } #endif #endif diff --git a/lib/services/services.c b/lib/services/services.c index debe9eccfa..20e824fb54 100644 --- a/lib/services/services.c +++ b/lib/services/services.c @@ -1,1081 +1,1088 @@ /* - * Copyright 2010-2018 Andrew Beekhof + * Copyright 2010-2019 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include "services_private.h" #include "services_lsb.h" #if SUPPORT_UPSTART # include #endif #if SUPPORT_SYSTEMD # include #endif /* TODO: Develop a rollover strategy */ static int operations = 0; static GHashTable *recurring_actions = NULL; /* ops waiting to run async because of conflicting active * pending ops */ static GList *blocked_ops = NULL; /* ops currently active (in-flight) */ static GList *inflight_ops = NULL; static void handle_blocked_ops(void); /*! * \brief Find first service class that can provide a specified agent * * \param[in] agent Name of agent to search for * * \return Service class if found, NULL otherwise * * \note The priority is LSB, then systemd, then upstart. It would be preferable * to put systemd first, but LSB merely requires a file existence check, * while systemd requires contacting D-Bus. */ const char * resources_find_service_class(const char *agent) { if (services__lsb_agent_exists(agent)) { return PCMK_RESOURCE_CLASS_LSB; } #if SUPPORT_SYSTEMD if (systemd_unit_exists(agent)) { return PCMK_RESOURCE_CLASS_SYSTEMD; } #endif #if SUPPORT_UPSTART if (upstart_job_exists(agent)) { return PCMK_RESOURCE_CLASS_UPSTART; } #endif return NULL; } static inline void init_recurring_actions(void) { if (recurring_actions == NULL) { recurring_actions = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, NULL); } } /*! * \internal * \brief Check whether op is in-flight systemd or upstart op * * \param[in] op Operation to check * * \return TRUE if op is in-flight systemd or upstart op */ static inline gboolean inflight_systemd_or_upstart(svc_action_t *op) { return (safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD) || safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_UPSTART)) && (g_list_find(inflight_ops, op) != NULL); } /*! * \internal * \brief Expand "service" alias to an actual resource class * * \param[in] rsc Resource name (for logging only) * \param[in] standard Resource class as configured * \param[in] agent Agent name to look for * * \return Newly allocated string with actual resource class * * \note The caller is responsible for calling free() on the result. */ static char * expand_resource_class(const char *rsc, const char *standard, const char *agent) { char *expanded_class = NULL; if (strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) == 0) { const char *found_class = resources_find_service_class(agent); if (found_class) { crm_debug("Found %s agent %s for %s", found_class, agent, rsc); expanded_class = strdup(found_class); } else { crm_info("Assuming resource class lsb for agent %s for %s", agent, rsc); expanded_class = strdup(PCMK_RESOURCE_CLASS_LSB); } } else { expanded_class = strdup(standard); } CRM_ASSERT(expanded_class); return expanded_class; } /*! * \brief Duplicate a file path, inserting a prefix if not absolute * * \param[in] filename File path to duplicate * \param[in] dirname If filename is not absolute, prefix to add * * \return Newly allocated memory with full path */ static char * dup_file_path(const char *filename, const char *dirname) { return (*filename == '/')? strdup(filename) : crm_strdup_printf("%s/%s", dirname, filename); } svc_action_t * resources_action_create(const char *name, const char *standard, const char *provider, const char *agent, const char *action, guint interval_ms, int timeout, GHashTable *params, enum svc_action_flags flags) { svc_action_t *op = NULL; uint32_t ra_caps = 0; /* * Do some up front sanity checks before we go off and * build the svc_action_t instance. */ if (crm_strlen_zero(name)) { crm_err("Cannot create operation without resource name"); goto return_error; } if (crm_strlen_zero(standard)) { crm_err("Cannot create operation for %s without resource class", name); goto return_error; } ra_caps = pcmk_get_ra_caps(standard); if (is_set(ra_caps, pcmk_ra_cap_provider) && crm_strlen_zero(provider)) { crm_err("Cannot create operation for %s without provider", name); goto return_error; } if (crm_strlen_zero(agent)) { crm_err("Cannot create operation for %s without agent name", name); goto return_error; } if (crm_strlen_zero(action)) { crm_err("Cannot create operation for %s without operation name", name); goto return_error; } /* * Sanity checks passed, proceed! */ op = calloc(1, sizeof(svc_action_t)); op->opaque = calloc(1, sizeof(svc_action_private_t)); op->rsc = strdup(name); op->interval_ms = interval_ms; op->timeout = timeout; op->standard = expand_resource_class(name, standard, agent); op->agent = strdup(agent); op->sequence = ++operations; op->flags = flags; op->id = generate_op_key(name, action, interval_ms); if (is_set(ra_caps, pcmk_ra_cap_status) && safe_str_eq(action, "monitor")) { op->action = strdup("status"); } else { op->action = strdup(action); } if (is_set(ra_caps, pcmk_ra_cap_provider)) { op->provider = strdup(provider); } if (is_set(ra_caps, pcmk_ra_cap_params)) { op->params = params; params = NULL; // so we don't free them in this function } if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_OCF) == 0) { op->opaque->exec = crm_strdup_printf("%s/resource.d/%s/%s", OCF_ROOT_DIR, provider, agent); op->opaque->args[0] = strdup(op->opaque->exec); op->opaque->args[1] = strdup(op->action); } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_LSB) == 0) { op->opaque->exec = services__lsb_agent_path(op->agent); op->opaque->args[0] = strdup(op->opaque->exec); op->opaque->args[1] = strdup(op->action); #if SUPPORT_SYSTEMD } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) { op->opaque->exec = strdup("systemd-dbus"); #endif #if SUPPORT_UPSTART } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_UPSTART) == 0) { op->opaque->exec = strdup("upstart-dbus"); #endif #if SUPPORT_NAGIOS } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_NAGIOS) == 0) { op->opaque->exec = dup_file_path(op->agent, NAGIOS_PLUGIN_DIR); op->opaque->args[0] = strdup(op->opaque->exec); if (safe_str_eq(op->action, "monitor") && (op->interval_ms == 0)) { /* Invoke --version for a nagios probe */ op->opaque->args[1] = strdup("--version"); } else if (op->params) { GHashTableIter iter; char *key = NULL; char *value = NULL; int index = 1; static int args_size = sizeof(op->opaque->args) / sizeof(char *); g_hash_table_iter_init(&iter, op->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value) && index <= args_size - 3) { if (safe_str_eq(key, XML_ATTR_CRM_VERSION) || strstr(key, CRM_META "_")) { continue; } op->opaque->args[index++] = crm_strdup_printf("--%s", key); op->opaque->args[index++] = strdup(value); } } // Nagios actions don't need to keep the parameters if (op->params != NULL) { g_hash_table_destroy(op->params); op->params = NULL; } #endif } else { crm_err("Unknown resource standard: %s", op->standard); goto return_error; } if(params) { g_hash_table_destroy(params); } return op; return_error: if(params) { g_hash_table_destroy(params); } services_action_free(op); return NULL; } svc_action_t * services_action_create_generic(const char *exec, const char *args[]) { svc_action_t *op; unsigned int cur_arg; op = calloc(1, sizeof(*op)); op->opaque = calloc(1, sizeof(svc_action_private_t)); op->opaque->exec = strdup(exec); op->opaque->args[0] = strdup(exec); for (cur_arg = 1; args && args[cur_arg - 1]; cur_arg++) { op->opaque->args[cur_arg] = strdup(args[cur_arg - 1]); if (cur_arg == DIMOF(op->opaque->args) - 1) { crm_err("svc_action_t args list not long enough for '%s' execution request.", exec); break; } } return op; } /*! * \brief Create an alert agent action * * \param[in] id Alert ID * \param[in] exec Path to alert agent executable * \param[in] timeout Action timeout * \param[in] params Parameters to use with action * \param[in] sequence Action sequence number * \param[in] cb_data Data to pass to callback function * * \return New action on success, NULL on error * \note It is the caller's responsibility to free cb_data. * The caller should not free params explicitly. */ svc_action_t * services_alert_create(const char *id, const char *exec, int timeout, GHashTable *params, int sequence, void *cb_data) { svc_action_t *action = services_action_create_generic(exec, NULL); CRM_ASSERT(action); action->timeout = timeout; action->id = strdup(id); action->params = params; action->sequence = sequence; action->cb_data = cb_data; return action; } /*! * \brief Set the user and group that an action will execute as * * \param[in,out] action Action to modify * \param[in] user Name of user to execute action as * \param[in] group Name of group to execute action as * * \return pcmk_ok on success, -errno otherwise * * \note This will have no effect unless the process executing the action runs * as root, and the action is not a systemd or upstart action. * We could implement this for systemd by adding User= and Group= to * [Service] in the override file, but that seems more likely to cause * problems than be useful. */ int services_action_user(svc_action_t *op, const char *user) { CRM_CHECK((op != NULL) && (user != NULL), return -EINVAL); return crm_user_lookup(user, &(op->opaque->uid), &(op->opaque->gid)); } static void set_alert_env(gpointer key, gpointer value, gpointer user_data) { int rc; if (value) { rc = setenv(key, value, 1); } else { rc = unsetenv(key); } if (rc < 0) { crm_perror(LOG_ERR, "setenv %s=%s", (char*)key, (value? (char*)value : "")); } else { crm_trace("setenv %s=%s", (char*)key, (value? (char*)value : "")); } } static void unset_alert_env(gpointer key, gpointer value, gpointer user_data) { if (unsetenv(key) < 0) { crm_perror(LOG_ERR, "unset %s", (char*)key); } else { crm_trace("unset %s", (char*)key); } } /*! * \brief Execute an alert agent action * * \param[in] action Action to execute * \param[in] cb Function to call when action completes * * \return TRUE if the library will free action, FALSE otherwise * * \note If this function returns FALSE, it is the caller's responsibility to * free the action with services_action_free(). */ gboolean services_alert_async(svc_action_t *action, void (*cb)(svc_action_t *op)) { gboolean responsible; action->synchronous = false; action->opaque->callback = cb; if (action->params) { g_hash_table_foreach(action->params, set_alert_env, NULL); } responsible = services_os_action_execute(action); if (action->params) { g_hash_table_foreach(action->params, unset_alert_env, NULL); } return responsible; } #if SUPPORT_DBUS /*! * \internal * \brief Update operation's pending DBus call, unreferencing old one if needed * * \param[in,out] op Operation to modify * \param[in] pending Pending call to set */ void services_set_op_pending(svc_action_t *op, DBusPendingCall *pending) { if (op->opaque->pending && (op->opaque->pending != pending)) { if (pending) { crm_info("Lost pending %s DBus call (%p)", op->id, op->opaque->pending); } else { crm_trace("Done with pending %s DBus call (%p)", op->id, op->opaque->pending); } dbus_pending_call_unref(op->opaque->pending); } op->opaque->pending = pending; if (pending) { crm_trace("Updated pending %s DBus call (%p)", op->id, pending); } else { crm_trace("Cleared pending %s DBus call", op->id); } } #endif void services_action_cleanup(svc_action_t * op) { - if(op->opaque == NULL) { + if ((op == NULL) || (op->opaque == NULL)) { return; } #if SUPPORT_DBUS if(op->opaque->timerid != 0) { crm_trace("Removing timer for call %s to %s", op->action, op->rsc); g_source_remove(op->opaque->timerid); op->opaque->timerid = 0; } if(op->opaque->pending) { - crm_trace("Cleaning up pending dbus call %p %s for %s", op->opaque->pending, op->action, op->rsc); - if(dbus_pending_call_get_completed(op->opaque->pending)) { - crm_warn("Pending dbus call %s for %s did not complete", op->action, op->rsc); + if (dbus_pending_call_get_completed(op->opaque->pending)) { + // This should never be the case + crm_warn("Result of %s op %s was unhandled", + op->standard, op->id); + } else { + crm_debug("Will ignore any result of canceled %s op %s", + op->standard, op->id); } dbus_pending_call_cancel(op->opaque->pending); - dbus_pending_call_unref(op->opaque->pending); - op->opaque->pending = NULL; + services_set_op_pending(op, NULL); } #endif if (op->opaque->stderr_gsource) { mainloop_del_fd(op->opaque->stderr_gsource); op->opaque->stderr_gsource = NULL; } if (op->opaque->stdout_gsource) { mainloop_del_fd(op->opaque->stdout_gsource); op->opaque->stdout_gsource = NULL; } } void services_action_free(svc_action_t * op) { unsigned int i; if (op == NULL) { return; } /* The operation should be removed from all tracking lists by this point. * If it's not, we have a bug somewhere, so bail. That may lead to a * memory leak, but it's better than a use-after-free segmentation fault. */ CRM_CHECK(g_list_find(inflight_ops, op) == NULL, return); CRM_CHECK(g_list_find(blocked_ops, op) == NULL, return); CRM_CHECK((recurring_actions == NULL) || (g_hash_table_lookup(recurring_actions, op->id) == NULL), return); services_action_cleanup(op); if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } free(op->id); free(op->opaque->exec); for (i = 0; i < DIMOF(op->opaque->args); i++) { free(op->opaque->args[i]); } free(op->opaque); free(op->rsc); free(op->action); free(op->standard); free(op->agent); free(op->provider); free(op->stdout_data); free(op->stderr_data); if (op->params) { g_hash_table_destroy(op->params); op->params = NULL; } free(op); } gboolean cancel_recurring_action(svc_action_t * op) { crm_info("Cancelling %s operation %s", op->standard, op->id); if (recurring_actions) { g_hash_table_remove(recurring_actions, op->id); } if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } return TRUE; } /*! * \brief Cancel a recurring action * * \param[in] name Name of resource that operation is for * \param[in] action Name of operation to cancel * \param[in] interval_ms Interval of operation to cancel * * \return TRUE if action was successfully cancelled, FALSE otherwise */ gboolean services_action_cancel(const char *name, const char *action, guint interval_ms) { gboolean cancelled = FALSE; char *id = generate_op_key(name, action, interval_ms); svc_action_t *op = NULL; /* We can only cancel a recurring action */ init_recurring_actions(); op = g_hash_table_lookup(recurring_actions, id); if (op == NULL) { goto done; } /* Tell operation_finalize() not to reschedule the operation */ op->cancel = TRUE; - /* Stop tracking it as a recurring operation, and stop its timer */ + /* Stop tracking it as a recurring operation, and stop its repeat timer */ cancel_recurring_action(op); /* If the op has a PID, it's an in-flight child process, so kill it. * * Whether the kill succeeds or fails, the main loop will send the op to * operation_finished() (and thus operation_finalize()) when the process * goes away. */ if (op->pid != 0) { crm_info("Terminating in-flight op %s (pid %d) early because it was cancelled", id, op->pid); cancelled = mainloop_child_kill(op->pid); if (cancelled == FALSE) { crm_err("Termination of %s (pid %d) failed", id, op->pid); } goto done; } - /* In-flight systemd and upstart ops don't have a pid. The relevant handlers - * will call operation_finalize() when the operation completes. - * @TODO: Can we request early termination, maybe using - * dbus_pending_call_cancel()? - */ +#if SUPPORT_DBUS + // In-flight systemd and upstart ops don't have a pid if (inflight_systemd_or_upstart(op)) { - crm_info("Will cancel %s op %s when in-flight instance completes", - op->standard, op->id); - cancelled = FALSE; - goto done; + inflight_ops = g_list_remove(inflight_ops, op); + + /* This will cause any result that comes in later to be discarded, so we + * don't call the callback and free the operation twice. + */ + services_action_cleanup(op); } +#endif + + // The rest of this is essentially equivalent to operation_finalize(), + // except without calling handle_blocked_ops() - /* Otherwise, operation is not in-flight, just report as cancelled */ + // Report operation as cancelled op->status = PCMK_LRM_OP_CANCELLED; if (op->opaque->callback) { op->opaque->callback(op); } blocked_ops = g_list_remove(blocked_ops, op); services_action_free(op); cancelled = TRUE; + // @TODO Initiate handle_blocked_ops() asynchronously done: free(id); return cancelled; } gboolean services_action_kick(const char *name, const char *action, guint interval_ms) { svc_action_t * op = NULL; char *id = generate_op_key(name, action, interval_ms); init_recurring_actions(); op = g_hash_table_lookup(recurring_actions, id); free(id); if (op == NULL) { return FALSE; } if (op->pid || inflight_systemd_or_upstart(op)) { return TRUE; } else { if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } recurring_action_timer(op); return TRUE; } } /*! * \internal * \brief Add a new recurring operation, checking for duplicates * * \param[in] op Operation to add * * \return TRUE if duplicate found (and reschedule), FALSE otherwise */ static gboolean handle_duplicate_recurring(svc_action_t * op) { svc_action_t * dup = NULL; /* check for duplicates */ dup = g_hash_table_lookup(recurring_actions, op->id); if (dup && (dup != op)) { /* update user data */ if (op->opaque->callback) { dup->opaque->callback = op->opaque->callback; dup->cb_data = op->cb_data; op->cb_data = NULL; } /* immediately execute the next interval */ if (dup->pid != 0) { if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } recurring_action_timer(dup); } /* free the duplicate */ services_action_free(op); return TRUE; } return FALSE; } inline static gboolean action_exec_helper(svc_action_t * op) { /* Whether a/synchronous must be decided (op->synchronous) beforehand. */ if (op->standard && (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_UPSTART) == 0)) { #if SUPPORT_UPSTART return upstart_job_exec(op); #endif } else if (op->standard && strcasecmp(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) { #if SUPPORT_SYSTEMD return systemd_unit_exec(op); #endif } else { return services_os_action_execute(op); } /* The 'op' has probably been freed if the execution functions return TRUE for the asynchronous 'op'. */ /* Avoid using the 'op' in here. */ return FALSE; } void services_add_inflight_op(svc_action_t * op) { if (op == NULL) { return; } CRM_ASSERT(op->synchronous == FALSE); /* keep track of ops that are in-flight to avoid collisions in the same namespace */ if (op->rsc) { inflight_ops = g_list_append(inflight_ops, op); } } /*! * \internal * \brief Stop tracking an operation that completed * * \param[in] op Operation to stop tracking */ void services_untrack_op(svc_action_t *op) { /* Op is no longer in-flight or blocked */ inflight_ops = g_list_remove(inflight_ops, op); blocked_ops = g_list_remove(blocked_ops, op); /* Op is no longer blocking other ops, so check if any need to run */ handle_blocked_ops(); } gboolean services_action_async(svc_action_t * op, void (*action_callback) (svc_action_t *)) { op->synchronous = false; if (action_callback) { op->opaque->callback = action_callback; } if (op->interval_ms > 0) { init_recurring_actions(); if (handle_duplicate_recurring(op) == TRUE) { /* entry rescheduled, dup freed */ /* exit early */ return TRUE; } g_hash_table_replace(recurring_actions, op->id, op); } if (op->rsc && is_op_blocked(op->rsc)) { blocked_ops = g_list_append(blocked_ops, op); return TRUE; } return action_exec_helper(op); } static gboolean processing_blocked_ops = FALSE; gboolean is_op_blocked(const char *rsc) { GList *gIter = NULL; svc_action_t *op = NULL; for (gIter = inflight_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; if (safe_str_eq(op->rsc, rsc)) { return TRUE; } } return FALSE; } static void handle_blocked_ops(void) { GList *executed_ops = NULL; GList *gIter = NULL; svc_action_t *op = NULL; gboolean res = FALSE; if (processing_blocked_ops) { /* avoid nested calling of this function */ return; } processing_blocked_ops = TRUE; /* n^2 operation here, but blocked ops are incredibly rare. this list * will be empty 99% of the time. */ for (gIter = blocked_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; if (is_op_blocked(op->rsc)) { continue; } executed_ops = g_list_append(executed_ops, op); res = action_exec_helper(op); if (res == FALSE) { op->status = PCMK_LRM_OP_ERROR; /* this can cause this function to be called recursively * which is why we have processing_blocked_ops static variable */ operation_finalize(op); } } for (gIter = executed_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; blocked_ops = g_list_remove(blocked_ops, op); } g_list_free(executed_ops); processing_blocked_ops = FALSE; } #if SUPPORT_NAGIOS static int nagios_get_metadata(const char *type, char **output) { int rc = pcmk_ok; FILE *file_strm = NULL; int start = 0, length = 0, read_len = 0; char *metadata_file = crm_strdup_printf("%s/%s.xml", NAGIOS_METADATA_DIR, type); file_strm = fopen(metadata_file, "r"); if (file_strm == NULL) { crm_err("Metadata file %s does not exist", metadata_file); free(metadata_file); return -EIO; } /* see how big the file is */ start = ftell(file_strm); fseek(file_strm, 0L, SEEK_END); length = ftell(file_strm); fseek(file_strm, 0L, start); CRM_ASSERT(length >= 0); CRM_ASSERT(start == ftell(file_strm)); if (length <= 0) { crm_info("%s was not valid", metadata_file); free(*output); *output = NULL; rc = -EIO; } else { crm_trace("Reading %d bytes from file", length); *output = calloc(1, (length + 1)); read_len = fread(*output, 1, length, file_strm); if (read_len != length) { crm_err("Calculated and read bytes differ: %d vs. %d", length, read_len); free(*output); *output = NULL; rc = -EIO; } } fclose(file_strm); free(metadata_file); return rc; } #endif static gboolean action_get_metadata(svc_action_t *op) { const char *class = op->standard; if (op->agent == NULL) { crm_err("meta-data requested without specifying agent"); return FALSE; } if (class == NULL) { crm_err("meta-data requested for agent %s without specifying class", op->agent); return FALSE; } if (!strcmp(class, PCMK_RESOURCE_CLASS_SERVICE)) { class = resources_find_service_class(op->agent); } if (class == NULL) { crm_err("meta-data requested for %s, but could not determine class", op->agent); return FALSE; } if (safe_str_eq(class, PCMK_RESOURCE_CLASS_LSB)) { return (services__get_lsb_metadata(op->agent, &op->stdout_data) >= 0); } #if SUPPORT_NAGIOS if (safe_str_eq(class, PCMK_RESOURCE_CLASS_NAGIOS)) { return (nagios_get_metadata(op->agent, &op->stdout_data) >= 0); } #endif return action_exec_helper(op); } gboolean services_action_sync(svc_action_t * op) { gboolean rc = TRUE; if (op == NULL) { crm_trace("No operation to execute"); return FALSE; } op->synchronous = true; if (safe_str_eq(op->action, "meta-data")) { /* Synchronous meta-data operations are handled specially. Since most * resource classes don't provide any meta-data, it has to be * synthesized from available information about the agent. * * services_action_async() doesn't treat meta-data actions specially, so * it will result in an error for classes that don't support the action. */ rc = action_get_metadata(op); } else { rc = action_exec_helper(op); } crm_trace(" > " CRM_OP_FMT ": %s = %d", op->rsc, op->action, op->interval_ms, op->opaque->exec, op->rc); if (op->stdout_data) { crm_trace(" > stdout: %s", op->stdout_data); } if (op->stderr_data) { crm_trace(" > stderr: %s", op->stderr_data); } return rc; } GList * get_directory_list(const char *root, gboolean files, gboolean executable) { return services_os_get_directory_list(root, files, executable); } GList * resources_list_standards(void) { GList *standards = NULL; GList *agents = NULL; standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_OCF)); standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_LSB)); standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_SERVICE)); #if SUPPORT_SYSTEMD agents = systemd_unit_listall(); if (agents) { standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_SYSTEMD)); g_list_free_full(agents, free); } #endif #if SUPPORT_UPSTART agents = upstart_job_listall(); if (agents) { standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_UPSTART)); g_list_free_full(agents, free); } #endif #if SUPPORT_NAGIOS agents = resources_os_list_nagios_agents(); if (agents) { standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_NAGIOS)); g_list_free_full(agents, free); } #endif return standards; } GList * resources_list_providers(const char *standard) { if (is_set(pcmk_get_ra_caps(standard), pcmk_ra_cap_provider)) { return resources_os_list_ocf_providers(); } return NULL; } GList * resources_list_agents(const char *standard, const char *provider) { if ((standard == NULL) || (strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) == 0)) { GList *tmp1; GList *tmp2; GList *result = services__list_lsb_agents(); if (standard == NULL) { tmp1 = result; tmp2 = resources_os_list_ocf_agents(NULL); if (tmp2) { result = g_list_concat(tmp1, tmp2); } } #if SUPPORT_SYSTEMD tmp1 = result; tmp2 = systemd_unit_listall(); if (tmp2) { result = g_list_concat(tmp1, tmp2); } #endif #if SUPPORT_UPSTART tmp1 = result; tmp2 = upstart_job_listall(); if (tmp2) { result = g_list_concat(tmp1, tmp2); } #endif return result; } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_OCF) == 0) { return resources_os_list_ocf_agents(provider); } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_LSB) == 0) { return services__list_lsb_agents(); #if SUPPORT_SYSTEMD } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) { return systemd_unit_listall(); #endif #if SUPPORT_UPSTART } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_UPSTART) == 0) { return upstart_job_listall(); #endif #if SUPPORT_NAGIOS } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_NAGIOS) == 0) { return resources_os_list_nagios_agents(); #endif } return NULL; } diff --git a/lib/services/services_linux.c b/lib/services/services_linux.c index bd22777b92..e2685e9639 100644 --- a/lib/services/services_linux.c +++ b/lib/services/services_linux.c @@ -1,939 +1,939 @@ /* - * Copyright (C) 2010-2016 Andrew Beekhof + * Copyright 2010-2019 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_SIGNALFD_H #include #endif #include "crm/crm.h" #include "crm/common/mainloop.h" #include "crm/services.h" #include "services_private.h" #if SUPPORT_CIBSECRETS # include "crm/common/cib_secrets.h" #endif static gboolean svc_read_output(int fd, svc_action_t * op, bool is_stderr) { char *data = NULL; int rc = 0, len = 0; char buf[500]; static const size_t buf_read_len = sizeof(buf) - 1; if (fd < 0) { crm_trace("No fd for %s", op->id); return FALSE; } if (is_stderr && op->stderr_data) { len = strlen(op->stderr_data); data = op->stderr_data; crm_trace("Reading %s stderr into offset %d", op->id, len); } else if (is_stderr == FALSE && op->stdout_data) { len = strlen(op->stdout_data); data = op->stdout_data; crm_trace("Reading %s stdout into offset %d", op->id, len); } else { crm_trace("Reading %s %s into offset %d", op->id, is_stderr?"stderr":"stdout", len); } do { rc = read(fd, buf, buf_read_len); if (rc > 0) { - crm_trace("Got %d chars: %.80s", rc, buf); buf[rc] = 0; + crm_trace("Got %d chars: %.80s", rc, buf); data = realloc_safe(data, len + rc + 1); len += sprintf(data + len, "%s", buf); } else if (errno != EINTR) { /* error or EOF * Cleanup happens in pipe_done() */ rc = FALSE; break; } } while (rc == buf_read_len || rc < 0); if (is_stderr) { op->stderr_data = data; } else { op->stdout_data = data; } return rc; } static int dispatch_stdout(gpointer userdata) { svc_action_t *op = (svc_action_t *) userdata; return svc_read_output(op->opaque->stdout_fd, op, FALSE); } static int dispatch_stderr(gpointer userdata) { svc_action_t *op = (svc_action_t *) userdata; return svc_read_output(op->opaque->stderr_fd, op, TRUE); } static void pipe_out_done(gpointer user_data) { svc_action_t *op = (svc_action_t *) user_data; crm_trace("%p", op); op->opaque->stdout_gsource = NULL; if (op->opaque->stdout_fd > STDOUT_FILENO) { close(op->opaque->stdout_fd); } op->opaque->stdout_fd = -1; } static void pipe_err_done(gpointer user_data) { svc_action_t *op = (svc_action_t *) user_data; op->opaque->stderr_gsource = NULL; if (op->opaque->stderr_fd > STDERR_FILENO) { close(op->opaque->stderr_fd); } op->opaque->stderr_fd = -1; } static struct mainloop_fd_callbacks stdout_callbacks = { .dispatch = dispatch_stdout, .destroy = pipe_out_done, }; static struct mainloop_fd_callbacks stderr_callbacks = { .dispatch = dispatch_stderr, .destroy = pipe_err_done, }; static void set_ocf_env(const char *key, const char *value, gpointer user_data) { if (setenv(key, value, 1) != 0) { crm_perror(LOG_ERR, "setenv failed for key:%s and value:%s", key, value); } } static void set_ocf_env_with_prefix(gpointer key, gpointer value, gpointer user_data) { char buffer[500]; snprintf(buffer, sizeof(buffer), "OCF_RESKEY_%s", (char *)key); set_ocf_env(buffer, value, user_data); } /*! * \internal * \brief Add environment variables suitable for an action * * \param[in] op Action to use */ static void add_action_env_vars(const svc_action_t *op) { if (safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_OCF) == FALSE) { return; } if (op->params) { g_hash_table_foreach(op->params, set_ocf_env_with_prefix, NULL); } set_ocf_env("OCF_RA_VERSION_MAJOR", "1", NULL); set_ocf_env("OCF_RA_VERSION_MINOR", "0", NULL); set_ocf_env("OCF_ROOT", OCF_ROOT_DIR, NULL); set_ocf_env("OCF_EXIT_REASON_PREFIX", PCMK_OCF_REASON_PREFIX, NULL); if (op->rsc) { set_ocf_env("OCF_RESOURCE_INSTANCE", op->rsc, NULL); } if (op->agent != NULL) { set_ocf_env("OCF_RESOURCE_TYPE", op->agent, NULL); } /* Notes: this is not added to specification yet. Sept 10,2004 */ if (op->provider != NULL) { set_ocf_env("OCF_RESOURCE_PROVIDER", op->provider, NULL); } } gboolean recurring_action_timer(gpointer data) { svc_action_t *op = data; crm_debug("Scheduling another invocation of %s", op->id); /* Clean out the old result */ free(op->stdout_data); op->stdout_data = NULL; free(op->stderr_data); op->stderr_data = NULL; op->opaque->repeat_timer = 0; services_action_async(op, NULL); return FALSE; } /* Returns FALSE if 'op' should be free'd by the caller */ gboolean operation_finalize(svc_action_t * op) { int recurring = 0; if (op->interval_ms) { if (op->cancel) { op->status = PCMK_LRM_OP_CANCELLED; cancel_recurring_action(op); } else { recurring = 1; op->opaque->repeat_timer = g_timeout_add(op->interval_ms, recurring_action_timer, (void *)op); } } if (op->opaque->callback) { op->opaque->callback(op); } op->pid = 0; services_untrack_op(op); if (!recurring && op->synchronous == FALSE) { /* * If this is a recurring action, do not free explicitly. * It will get freed whenever the action gets cancelled. */ services_action_free(op); return TRUE; } services_action_cleanup(op); return FALSE; } static void operation_finished(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode) { svc_action_t *op = mainloop_child_userdata(p); char *prefix = crm_strdup_printf("%s:%d", op->id, op->pid); mainloop_clear_child_userdata(p); op->status = PCMK_LRM_OP_DONE; CRM_ASSERT(op->pid == pid); crm_trace("%s %p %p", prefix, op->opaque->stderr_gsource, op->opaque->stdout_gsource); if (op->opaque->stderr_gsource) { /* Make sure we have read everything from the buffer. * Depending on the priority mainloop gives the fd, operation_finished * could occur before all the reads are done. Force the read now.*/ crm_trace("%s dispatching stderr", prefix); dispatch_stderr(op); crm_trace("%s: %p", op->id, op->stderr_data); mainloop_del_fd(op->opaque->stderr_gsource); op->opaque->stderr_gsource = NULL; } if (op->opaque->stdout_gsource) { /* Make sure we have read everything from the buffer. * Depending on the priority mainloop gives the fd, operation_finished * could occur before all the reads are done. Force the read now.*/ crm_trace("%s dispatching stdout", prefix); dispatch_stdout(op); crm_trace("%s: %p", op->id, op->stdout_data); mainloop_del_fd(op->opaque->stdout_gsource); op->opaque->stdout_gsource = NULL; } if (signo) { if (mainloop_child_timeout(p)) { crm_warn("%s - timed out after %dms", prefix, op->timeout); op->status = PCMK_LRM_OP_TIMEOUT; op->rc = PCMK_OCF_TIMEOUT; } else if (op->cancel) { /* If an in-flight recurring operation was killed because it was * cancelled, don't treat that as a failure. */ crm_info("%s - terminated with signal %d", prefix, signo); op->status = PCMK_LRM_OP_CANCELLED; op->rc = PCMK_OCF_OK; } else { crm_warn("%s - terminated with signal %d", prefix, signo); op->status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_SIGNAL; } } else { op->rc = exitcode; crm_debug("%s - exited with rc=%d", prefix, exitcode); } free(prefix); prefix = crm_strdup_printf("%s:%d:stderr", op->id, op->pid); crm_log_output(LOG_NOTICE, prefix, op->stderr_data); free(prefix); prefix = crm_strdup_printf("%s:%d:stdout", op->id, op->pid); crm_log_output(LOG_DEBUG, prefix, op->stdout_data); free(prefix); operation_finalize(op); } /*! * \internal * \brief Set operation rc and status per errno from stat(), fork() or execvp() * * \param[in,out] op Operation to set rc and status for * \param[in] error Value of errno after system call * * \return void */ static void services_handle_exec_error(svc_action_t * op, int error) { int rc_not_installed, rc_insufficient_priv, rc_exec_error; /* Mimic the return codes for each standard as that's what we'll convert back from in get_uniform_rc() */ if (safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB) && safe_str_eq(op->action, "status")) { rc_not_installed = PCMK_LSB_STATUS_NOT_INSTALLED; rc_insufficient_priv = PCMK_LSB_STATUS_INSUFFICIENT_PRIV; rc_exec_error = PCMK_LSB_STATUS_UNKNOWN; #if SUPPORT_NAGIOS } else if (safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_NAGIOS)) { rc_not_installed = NAGIOS_NOT_INSTALLED; rc_insufficient_priv = NAGIOS_INSUFFICIENT_PRIV; rc_exec_error = PCMK_OCF_EXEC_ERROR; #endif } else { rc_not_installed = PCMK_OCF_NOT_INSTALLED; rc_insufficient_priv = PCMK_OCF_INSUFFICIENT_PRIV; rc_exec_error = PCMK_OCF_EXEC_ERROR; } switch (error) { /* see execve(2), stat(2) and fork(2) */ case ENOENT: /* No such file or directory */ case EISDIR: /* Is a directory */ case ENOTDIR: /* Path component is not a directory */ case EINVAL: /* Invalid executable format */ case ENOEXEC: /* Invalid executable format */ op->rc = rc_not_installed; op->status = PCMK_LRM_OP_NOT_INSTALLED; break; case EACCES: /* permission denied (various errors) */ case EPERM: /* permission denied (various errors) */ op->rc = rc_insufficient_priv; op->status = PCMK_LRM_OP_ERROR; break; default: op->rc = rc_exec_error; op->status = PCMK_LRM_OP_ERROR; } } static void action_launch_child(svc_action_t *op) { int lpc; /* SIGPIPE is ignored (which is different from signal blocking) by the gnutls library. * Depending on the libqb version in use, libqb may set SIGPIPE to be ignored as well. * We do not want this to be inherited by the child process. By resetting this the signal * to the default behavior, we avoid some potential odd problems that occur during OCF * scripts when SIGPIPE is ignored by the environment. */ signal(SIGPIPE, SIG_DFL); #if defined(HAVE_SCHED_SETSCHEDULER) if (sched_getscheduler(0) != SCHED_OTHER) { struct sched_param sp; memset(&sp, 0, sizeof(sp)); sp.sched_priority = 0; if (sched_setscheduler(0, SCHED_OTHER, &sp) == -1) { crm_perror(LOG_ERR, "Could not reset scheduling policy to SCHED_OTHER for %s", op->id); } } #endif if (setpriority(PRIO_PROCESS, 0, 0) == -1) { crm_perror(LOG_ERR, "Could not reset process priority to 0 for %s", op->id); } /* Man: The call setpgrp() is equivalent to setpgid(0,0) * _and_ compiles on BSD variants too * need to investigate if it works the same too. */ setpgid(0, 0); // Close all file descriptors except stdin/stdout/stderr for (lpc = getdtablesize() - 1; lpc > STDERR_FILENO; lpc--) { close(lpc); } #if SUPPORT_CIBSECRETS if (replace_secret_params(op->rsc, op->params) < 0) { /* replacing secrets failed! */ if (safe_str_eq(op->action,"stop")) { /* don't fail on stop! */ crm_info("proceeding with the stop operation for %s", op->rsc); } else { crm_err("failed to get secrets for %s, " "considering resource not configured", op->rsc); _exit(PCMK_OCF_NOT_CONFIGURED); } } #endif add_action_env_vars(op); /* Become the desired user */ if (op->opaque->uid && (geteuid() == 0)) { // If requested, set effective group if (op->opaque->gid && (setgid(op->opaque->gid) < 0)) { crm_perror(LOG_ERR, "Could not set child group to %d", op->opaque->gid); _exit(PCMK_OCF_NOT_CONFIGURED); } // Erase supplementary group list // (We could do initgroups() if we kept a copy of the username) if (setgroups(0, NULL) < 0) { crm_perror(LOG_ERR, "Could not set child groups"); _exit(PCMK_OCF_NOT_CONFIGURED); } // Set effective user if (setuid(op->opaque->uid) < 0) { crm_perror(LOG_ERR, "setting user to %d", op->opaque->uid); _exit(PCMK_OCF_NOT_CONFIGURED); } } /* execute the RA */ execvp(op->opaque->exec, op->opaque->args); /* Most cases should have been already handled by stat() */ services_handle_exec_error(op, errno); _exit(op->rc); } #ifndef HAVE_SYS_SIGNALFD_H static int sigchld_pipe[2] = { -1, -1 }; static void sigchld_handler() { if ((sigchld_pipe[1] >= 0) && (write(sigchld_pipe[1], "", 1) == -1)) { crm_perror(LOG_TRACE, "Could not poke SIGCHLD self-pipe"); } } #endif static void action_synced_wait(svc_action_t * op, sigset_t *mask) { int status = 0; int timeout = op->timeout; int sfd = -1; time_t start = -1; struct pollfd fds[3]; int wait_rc = 0; #ifdef HAVE_SYS_SIGNALFD_H sfd = signalfd(-1, mask, SFD_NONBLOCK); if (sfd < 0) { crm_perror(LOG_ERR, "signalfd() failed"); } #else sfd = sigchld_pipe[0]; #endif fds[0].fd = op->opaque->stdout_fd; fds[0].events = POLLIN; fds[0].revents = 0; fds[1].fd = op->opaque->stderr_fd; fds[1].events = POLLIN; fds[1].revents = 0; fds[2].fd = sfd; fds[2].events = POLLIN; fds[2].revents = 0; crm_trace("Waiting for %d", op->pid); start = time(NULL); do { int poll_rc = poll(fds, 3, timeout); if (poll_rc > 0) { if (fds[0].revents & POLLIN) { svc_read_output(op->opaque->stdout_fd, op, FALSE); } if (fds[1].revents & POLLIN) { svc_read_output(op->opaque->stderr_fd, op, TRUE); } if (fds[2].revents & POLLIN) { #ifdef HAVE_SYS_SIGNALFD_H struct signalfd_siginfo fdsi; ssize_t s; s = read(sfd, &fdsi, sizeof(struct signalfd_siginfo)); if (s != sizeof(struct signalfd_siginfo)) { crm_perror(LOG_ERR, "Read from signal fd %d failed", sfd); } else if (fdsi.ssi_signo == SIGCHLD) { #else if (1) { /* Clear out the sigchld pipe. */ char ch; while (read(sfd, &ch, 1) == 1) /*omit*/; #endif wait_rc = waitpid(op->pid, &status, WNOHANG); if (wait_rc > 0) { break; } else if (wait_rc < 0){ if (errno == ECHILD) { /* Here, don't dare to kill and bail out... */ break; } else { /* ...otherwise pretend process still runs. */ wait_rc = 0; } crm_perror(LOG_ERR, "waitpid() for %d failed", op->pid); } } } } else if (poll_rc == 0) { timeout = 0; break; } else if (poll_rc < 0) { if (errno != EINTR) { crm_perror(LOG_ERR, "poll() failed"); break; } } timeout = op->timeout - (time(NULL) - start) * 1000; } while ((op->timeout < 0 || timeout > 0)); crm_trace("Child done: %d", op->pid); if (wait_rc <= 0) { op->rc = PCMK_OCF_UNKNOWN_ERROR; if (op->timeout > 0 && timeout <= 0) { op->status = PCMK_LRM_OP_TIMEOUT; crm_warn("%s:%d - timed out after %dms", op->id, op->pid, op->timeout); } else { op->status = PCMK_LRM_OP_ERROR; } /* If only child hasn't been successfully waited for, yet. This is to limit killing wrong target a bit more. */ if (wait_rc == 0 && waitpid(op->pid, &status, WNOHANG) == 0) { if (kill(op->pid, SIGKILL)) { crm_err("kill(%d, KILL) failed: %d", op->pid, errno); } /* Safe to skip WNOHANG here as we sent non-ignorable signal. */ while (waitpid(op->pid, &status, 0) == (pid_t) -1 && errno == EINTR) /*omit*/; } } else if (WIFEXITED(status)) { op->status = PCMK_LRM_OP_DONE; op->rc = WEXITSTATUS(status); crm_info("Managed %s process %d exited with rc=%d", op->id, op->pid, op->rc); } else if (WIFSIGNALED(status)) { int signo = WTERMSIG(status); op->status = PCMK_LRM_OP_ERROR; crm_err("Managed %s process %d exited with signal=%d", op->id, op->pid, signo); } #ifdef WCOREDUMP if (WCOREDUMP(status)) { crm_err("Managed %s process %d dumped core", op->id, op->pid); } #endif svc_read_output(op->opaque->stdout_fd, op, FALSE); svc_read_output(op->opaque->stderr_fd, op, TRUE); close(op->opaque->stdout_fd); close(op->opaque->stderr_fd); #ifdef HAVE_SYS_SIGNALFD_H close(sfd); #endif } /* For an asynchronous 'op', returns FALSE if 'op' should be free'd by the caller */ /* For a synchronous 'op', returns FALSE if 'op' fails */ gboolean services_os_action_execute(svc_action_t * op) { int stdout_fd[2]; int stderr_fd[2]; int rc; struct stat st; sigset_t *pmask; #ifdef HAVE_SYS_SIGNALFD_H sigset_t mask; sigset_t old_mask; #define sigchld_cleanup() do { \ if (sigismember(&old_mask, SIGCHLD) == 0) { \ if (sigprocmask(SIG_UNBLOCK, &mask, NULL) < 0) { \ crm_perror(LOG_ERR, "sigprocmask() failed to unblock sigchld"); \ } \ } \ } while (0) #else struct sigaction sa; struct sigaction old_sa; #define sigchld_cleanup() do { \ if (sigaction(SIGCHLD, &old_sa, NULL) < 0) { \ crm_perror(LOG_ERR, "sigaction() failed to remove sigchld handler"); \ } \ close(sigchld_pipe[0]); \ close(sigchld_pipe[1]); \ sigchld_pipe[0] = sigchld_pipe[1] = -1; \ } while(0) #endif /* Fail fast */ if(stat(op->opaque->exec, &st) != 0) { rc = errno; crm_warn("Cannot execute '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!op->synchronous) { return operation_finalize(op); } return FALSE; } if (pipe(stdout_fd) < 0) { rc = errno; crm_err("pipe(stdout_fd) failed. '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!op->synchronous) { return operation_finalize(op); } return FALSE; } if (pipe(stderr_fd) < 0) { rc = errno; close(stdout_fd[0]); close(stdout_fd[1]); crm_err("pipe(stderr_fd) failed. '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!op->synchronous) { return operation_finalize(op); } return FALSE; } if (op->synchronous) { #ifdef HAVE_SYS_SIGNALFD_H sigemptyset(&mask); sigaddset(&mask, SIGCHLD); sigemptyset(&old_mask); if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) { crm_perror(LOG_ERR, "sigprocmask() failed to block sigchld"); } pmask = &mask; #else if(pipe(sigchld_pipe) == -1) { crm_perror(LOG_ERR, "pipe() failed"); } rc = crm_set_nonblocking(sigchld_pipe[0]); if (rc < 0) { crm_warn("Could not set pipe input non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } rc = crm_set_nonblocking(sigchld_pipe[1]); if (rc < 0) { crm_warn("Could not set pipe output non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } sa.sa_handler = sigchld_handler; sa.sa_flags = 0; sigemptyset(&sa.sa_mask); if (sigaction(SIGCHLD, &sa, &old_sa) < 0) { crm_perror(LOG_ERR, "sigaction() failed to set sigchld handler"); } pmask = NULL; #endif } op->pid = fork(); switch (op->pid) { case -1: rc = errno; close(stdout_fd[0]); close(stdout_fd[1]); close(stderr_fd[0]); close(stderr_fd[1]); crm_err("Could not execute '%s': %s (%d)", op->opaque->exec, pcmk_strerror(rc), rc); services_handle_exec_error(op, rc); if (!op->synchronous) { return operation_finalize(op); } sigchld_cleanup(); return FALSE; case 0: /* Child */ close(stdout_fd[0]); close(stderr_fd[0]); if (STDOUT_FILENO != stdout_fd[1]) { if (dup2(stdout_fd[1], STDOUT_FILENO) != STDOUT_FILENO) { crm_err("dup2() failed (stdout)"); } close(stdout_fd[1]); } if (STDERR_FILENO != stderr_fd[1]) { if (dup2(stderr_fd[1], STDERR_FILENO) != STDERR_FILENO) { crm_err("dup2() failed (stderr)"); } close(stderr_fd[1]); } if (op->synchronous) { sigchld_cleanup(); } action_launch_child(op); CRM_ASSERT(0); /* action_launch_child is effectively noreturn */ } /* Only the parent reaches here */ close(stdout_fd[1]); close(stderr_fd[1]); op->opaque->stdout_fd = stdout_fd[0]; rc = crm_set_nonblocking(op->opaque->stdout_fd); if (rc < 0) { crm_warn("Could not set child output non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } op->opaque->stderr_fd = stderr_fd[0]; rc = crm_set_nonblocking(op->opaque->stderr_fd); if (rc < 0) { crm_warn("Could not set child error output non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } if (op->synchronous) { action_synced_wait(op, pmask); sigchld_cleanup(); } else { crm_trace("Async waiting for %d - %s", op->pid, op->opaque->exec); mainloop_child_add_with_flags(op->pid, op->timeout, op->id, op, (op->flags & SVC_ACTION_LEAVE_GROUP) ? mainloop_leave_pid_group : 0, operation_finished); op->opaque->stdout_gsource = mainloop_add_fd(op->id, G_PRIORITY_LOW, op->opaque->stdout_fd, op, &stdout_callbacks); op->opaque->stderr_gsource = mainloop_add_fd(op->id, G_PRIORITY_LOW, op->opaque->stderr_fd, op, &stderr_callbacks); services_add_inflight_op(op); } return TRUE; } GList * services_os_get_directory_list(const char *root, gboolean files, gboolean executable) { GList *list = NULL; struct dirent **namelist; int entries = 0, lpc = 0; char buffer[PATH_MAX]; entries = scandir(root, &namelist, NULL, alphasort); if (entries <= 0) { return list; } for (lpc = 0; lpc < entries; lpc++) { struct stat sb; if ('.' == namelist[lpc]->d_name[0]) { free(namelist[lpc]); continue; } snprintf(buffer, sizeof(buffer), "%s/%s", root, namelist[lpc]->d_name); if (stat(buffer, &sb)) { continue; } if (S_ISDIR(sb.st_mode)) { if (files) { free(namelist[lpc]); continue; } } else if (S_ISREG(sb.st_mode)) { if (files == FALSE) { free(namelist[lpc]); continue; } else if (executable && (sb.st_mode & S_IXUSR) == 0 && (sb.st_mode & S_IXGRP) == 0 && (sb.st_mode & S_IXOTH) == 0) { free(namelist[lpc]); continue; } } list = g_list_append(list, strdup(namelist[lpc]->d_name)); free(namelist[lpc]); } free(namelist); return list; } GList * resources_os_list_ocf_providers(void) { return get_directory_list(OCF_ROOT_DIR "/resource.d", FALSE, TRUE); } GList * resources_os_list_ocf_agents(const char *provider) { GList *gIter = NULL; GList *result = NULL; GList *providers = NULL; if (provider) { char buffer[500]; snprintf(buffer, sizeof(buffer), "%s/resource.d/%s", OCF_ROOT_DIR, provider); return get_directory_list(buffer, TRUE, TRUE); } providers = resources_os_list_ocf_providers(); for (gIter = providers; gIter != NULL; gIter = gIter->next) { GList *tmp1 = result; GList *tmp2 = resources_os_list_ocf_agents(gIter->data); if (tmp2) { result = g_list_concat(tmp1, tmp2); } } g_list_free_full(providers, free); return result; } #if SUPPORT_NAGIOS GList * resources_os_list_nagios_agents(void) { GList *plugin_list = NULL; GList *result = NULL; GList *gIter = NULL; plugin_list = get_directory_list(NAGIOS_PLUGIN_DIR, TRUE, TRUE); /* Make sure both the plugin and its metadata exist */ for (gIter = plugin_list; gIter != NULL; gIter = gIter->next) { const char *plugin = gIter->data; char *metadata = crm_strdup_printf(NAGIOS_METADATA_DIR "/%s.xml", plugin); struct stat st; if (stat(metadata, &st) == 0) { result = g_list_append(result, strdup(plugin)); } free(metadata); } g_list_free_full(plugin_list, free); return result; } #endif diff --git a/pacemaker.spec.in b/pacemaker.spec.in index 6b2b268ba7..d11ba1e70a 100644 --- a/pacemaker.spec.in +++ b/pacemaker.spec.in @@ -1,835 +1,837 @@ # Globals and defines to control package behavior (configure these as desired) ## User and group to use for nonprivileged services %global uname hacluster %global gname haclient ## Where to install Pacemaker documentation %global pcmk_docdir %{_docdir}/%{name} ## GitHub entity that distributes source (for ease of using a fork) %global github_owner ClusterLabs ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) %global pcmkversion 2.0.1 %global specversion 1 ## Upstream commit (or git tag, such as "Pacemaker-" plus the ## {pcmkversion} macro for an official release) to use for this package %global commit HEAD ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %global commit_abbrev 7 ## Python major version to use (2, 3, or 0 for auto-detect) %global python_major 0 # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) ## Short version of git commit %define shortcommit %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo ${c:10};; *%{rparen} echo ${c:0:%{commit_abbrev}};; esac) ## Whether this is a tagged release %define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?) ## Whether this is a release candidate (in case of a tagged release) %define pre_release %([ "%{tag_release}" -eq 0 ] || { case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;; esac; }; echo $?) ## Heuristic used to infer bleeding-edge deployments that are ## less likely to have working versions of the documentation tools %define bleeding %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) ## Whether this platform defaults to using systemd as an init system ## (needs to be evaluated prior to BuildRequires being enumerated and ## installed as it's intended to conditionally select some of these, and ## for that there are only few indicators with varying reliability: ## - presence of systemd-defined macros (when building in a full-fledged ## environment, which is not the case with ordinary mock-based builds) ## - systemd-aware rpm as manifested with the presence of particular ## macro (rpm itself will trivially always be present when building) ## - existence of /usr/lib/os-release file, which is something heavily ## propagated by systemd project ## - when not good enough, there's always a possibility to check ## particular distro-specific macros (incl. version comparison) %define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \ } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \ } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) %if 0%{?fedora} > 20 || 0%{?rhel} > 7 ## Base GnuTLS cipher priorities (presumably only the initial, required keyword) ## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'" %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM} %endif # Python-related definitions ## Use Python 3 on certain platforms if major version not specified %if %{?python_major} == 0 %if 0%{?fedora} > 26 || 0%{?rhel} > 7 %global python_major 3 %endif %endif ## Turn off auto-compilation of Python files outside Python specific paths, ## so there's no risk that unexpected "__python" macro gets picked to do the ## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected) ## -- distro-dependent tricks or automake's fallback to be applied there %if %{defined _python_bytecompile_extra} %global _python_bytecompile_extra 0 %else ### the statement effectively means no RPM-native byte-compiling will occur at ### all, so distro-dependent tricks for Python-specific packages to be applied %global __os_install_post %(echo '%{__os_install_post}' | { sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) %endif ## Values that differ by Python major version %if 0%{?python_major} > 2 %global python_name python3 %global python_path %{?__python3}%{!?__python3:/usr/bin/python%{?python3_pkgversion}%{!?python3_pkgversion:3}} %define python_site %{?python3_sitelib}%{!?python3_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else %if 0%{?python_major} > 1 %global python_name python2 %global python_path %{?__python2}%{!?__python2:/usr/bin/python%{?python2_pkgversion}%{!?python2_pkgversion:2}} %define python_site %{?python2_sitelib}%{!?python2_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else %global python_name python %global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}} %define python_site %{?python_sitelib}%{!?python_sitelib:%( python -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %endif %endif # Definitions for backward compatibility with older RPM versions ## Ensure the license macro behaves consistently (older RPM will otherwise ## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts: ## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77 %if !%{defined _licensedir} %define description %{lua: rpm.define("license %doc") print("%description") } %endif # Define conditionals so that "rpmbuild --with " and # "rpmbuild --without " can enable and disable specific features ## Add option to enable support for stonith/external fencing agents %bcond_with stonithd ## Add option to create binaries suitable for use with profiling tools %bcond_with profiling ## Add option to create binaries with coverage analysis %bcond_with coverage ## Add option to skip generating documentation ## (the build tools aren't available everywhere) %bcond_without doc ## Add option to prefix package version with "0." ## (so later "official" packages will be considered updates) %bcond_with pre_release ## Add option to ship Upstart job files %bcond_with upstart_job ## Add option to turn off hardening of libraries and daemon executables %bcond_without hardening ## Add option to disable links for legacy daemon names %bcond_without legacy_links # Keep sane profiling data if requested %if %{with profiling} ## Disable -debuginfo package and stripping binaries/libraries %define debug_package %{nil} %endif # Define the release version # (do not look at externally enforced pre-release flag for tagged releases # as only -rc tags, captured with the second condition, implies that then) %if (!%{tag_release} && %{with pre_release}) || 0%{pre_release} %if 0%{pre_release} %define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) %else %define pcmk_release 0.%{specversion}.%{shortcommit}.git %endif %else %if 0%{tag_release} %define pcmk_release %{specversion} %else %define pcmk_release %{specversion}.%{shortcommit}.git %endif %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Url: http://www.clusterlabs.org Group: System Environment/Daemons # Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL: # https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz Requires: resource-agents Requires: %{name}-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} %if !%{defined _unitdir} Requires: procps-ng Requires: psmisc %endif %{?systemd_requires} Requires: %{python_path} BuildRequires: %{python_name}-devel # Pacemaker requires a minimum libqb functionality Requires: libqb >= 0.13.0 BuildRequires: libqb-devel >= 0.13.0 # Basics required for the build (even if usually satisfied through other BRs) BuildRequires: coreutils findutils grep sed # Required for core functionality BuildRequires: automake autoconf gcc libtool pkgconfig libtool-ltdl-devel BuildRequires: pkgconfig(glib-2.0) >= 2.16 BuildRequires: libxml2-devel libxslt-devel libuuid-devel BuildRequires: bzip2-devel # Enables optional functionality BuildRequires: ncurses-devel docbook-style-xsl BuildRequires: help2man gnutls-devel pam-devel pkgconfig(dbus-1) %if %{systemd_native} BuildRequires: pkgconfig(systemd) %endif Requires: corosync >= 2.0.0 BuildRequires: corosynclib-devel >= 2.0.0 %if %{with stonithd} BuildRequires: cluster-glue-libs-devel %endif ## (note no avoiding effect when building through non-customized mock) %if !%{bleeding} %if %{with doc} BuildRequires: inkscape asciidoc publican %endif %endif Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} # Pacemaker uses the crypto/md5 module from gnulib Provides: bundled(gnulib) %description Pacemaker is an advanced, scalable High-Availability cluster resource manager. It supports more than 16 node clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --with(out) : coverage doc stonithd hardening pre_release profiling upstart_job %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters Group: System Environment/Daemons Requires: %{name}-libs%{?_isa} = %{version}-%{release} %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Recommends: pcmk-cluster-manager = %{version}-%{release} %endif Requires: perl-TimeDate Requires: procps-ng Requires: psmisc Requires(post):coreutils %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package libs License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries Group: System Environment/Daemons Requires(pre): shadow-utils Requires: %{name}-schemas = %{version}-%{release} +# sbd 1.4.0+ supports the libpe_status API for pe_working_set_t +Conflicts: sbd < 1.4.0 %description libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-libs package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker Group: System Environment/Daemons Requires: %{name}-libs%{?_isa} = %{version}-%{release} %description cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package remote %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Summary: Pacemaker remote daemon for non-cluster nodes Group: System Environment/Daemons Requires: %{name}-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents %if !%{defined _unitdir} Requires: procps-ng %endif # -remote can be fully independent of systemd %{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}} Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description remote Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-remote package contains the Pacemaker Remote daemon which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package libs-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{name}-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: libuuid-devel%{?_isa} libtool-ltdl-devel%{?_isa} Requires: libxml2-devel%{?_isa} libxslt-devel%{?_isa} Requires: bzip2-devel%{?_isa} glib2-devel%{?_isa} Requires: libqb-devel%{?_isa} Requires: corosynclib-devel%{?_isa} >= 2.0.0 %description libs-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-libs-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: %{python_path} Requires: %{name}-libs = %{version}-%{release} Requires: procps-ng Requires: psmisc BuildArch: noarch # systemd python bindings are separate package in some distros %if %{defined systemd_requires} %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Requires: %{python_name}-systemd %else %if 0%{?fedora} > 20 || 0%{?rhel} > 6 Requires: systemd-python %endif %endif %endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: CC-BY-SA-4.0 Summary: Documentation for Pacemaker Group: Documentation BuildArch: noarch %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager. %package schemas License: GPLv2+ Summary: Schemas and upgrade stylesheets for Pacemaker BuildArch: noarch %description schemas Schemas and upgrade stylesheets for Pacemaker Pacemaker is an advanced, scalable High-Availability cluster resource manager. %prep %setup -q -n %{name}-%{commit} %build # Early versions of autotools (e.g. RHEL <= 5) do not support --docdir export docdir=%{pcmk_docdir} export systemdunitdir=%{?_unitdir}%{!?_unitdir:no} %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will # use its own defaults otherwise; if such hardenings are completely # undesired, rpmbuild using "--without hardening" # (or "--define '_without_hardening 1'") export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %endif ./autogen.sh %{configure} \ PYTHON=%{python_path} \ %{!?with_hardening: --disable-hardening} \ %{!?with_legacy_links: --disable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{!?with_doc: --with-brand=} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ --with-initdir=%{_initrddir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif make %{_smp_mflags} V=1 all %check { cts/cts-scheduler --run one-or-more-unrunnable-instances \ && cts/cts-cli \ && touch .CHECKED } 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint [ -f .CHECKED ] && rm -f -- .CHECKED exit $? # TODO remove when rpm<4.14 compatibility irrelevant %install # skip automake-native Python byte-compilation, since RPM-native one (possibly # distro-confined to Python-specific directories, which is currently the only # relevant place, anyway) assures proper intrinsic alignment with wider system # (such as with py_byte_compile macro, which is concurrent Fedora/EL specific) make install \ DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \ %{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 daemons/pacemakerd/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon %if %{with upstart_job} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf install -m 644 pacemakerd/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf %endif %if %{defined _unitdir} mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif # Don't package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # For now, don't package the servicelog-related binaries built only for # ppc64le when certain dependencies are installed. If they get more exercise by # advanced users, we can reconsider. rm -f %{buildroot}/%{_sbindir}/notifyServicelogEvent rm -f %{buildroot}/%{_sbindir}/ipmiservicelogd # Don't ship init scripts for systemd based platforms %if %{defined _unitdir} rm -f %{buildroot}/%{_initrddir}/pacemaker rm -f %{buildroot}/%{_initrddir}/pacemaker_remote %endif # Byte-compile Python sources where suitable and the distro procedures known %if %{defined py_byte_compile} %{py_byte_compile %{python_path} %{buildroot}%{_datadir}/pacemaker/tests} %if !%{defined _python_bytecompile_extra} %{py_byte_compile %{python_path} %{buildroot}%{python_site}/cts} %endif %endif %if %{with coverage} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %post %if %{defined _unitdir} %systemd_post pacemaker.service %else /sbin/chkconfig --add pacemaker || : %endif %preun %if %{defined _unitdir} %systemd_preun pacemaker.service %else /sbin/service pacemaker stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker || : fi %endif %postun %if %{defined _unitdir} %systemd_postun_with_restart pacemaker.service %endif %pre remote %if %{defined _unitdir} # Stop the service before anything is touched, and remember to restart # it as one of the last actions (compared to using systemd_postun_with_restart, # this avoids suicide when sbd is in use) systemctl --quiet is-active pacemaker_remote if [ $? -eq 0 ] ; then mkdir -p %{_localstatedir}/lib/rpm-state/%{name} touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote systemctl stop pacemaker_remote >/dev/null 2>&1 else rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post remote %if %{defined _unitdir} %systemd_post pacemaker_remote.service %else /sbin/chkconfig --add pacemaker_remote || : %endif %preun remote %if %{defined _unitdir} %systemd_preun pacemaker_remote.service %else /sbin/service pacemaker_remote stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker_remote || : fi %endif %postun remote %if %{defined _unitdir} # This next line is a no-op, because we stopped the service earlier, but # we leave it here because it allows us to revert to the standard behavior # in the future if desired %systemd_postun_with_restart pacemaker_remote.service # Explicitly take care of removing the flag-file(s) upon final removal if [ "$1" -eq 0 ] ; then rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %posttrans remote %if %{defined _unitdir} if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then systemctl start pacemaker_remote >/dev/null 2>&1 rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post cli %if %{defined _unitdir} %systemd_post crm_mon.service %endif if [ "$1" -eq 2 ]; then # Package upgrade, not initial install: # Move any pre-2.0 logs to new location to ensure they get rotated { mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \ || mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker } >/dev/null 2>/dev/null || : fi %preun cli %if %{defined _unitdir} %systemd_preun crm_mon.service %endif %postun cli %if %{defined _unitdir} %systemd_postun_with_restart crm_mon.service %endif %pre libs getent group %{gname} >/dev/null || groupadd -r %{gname} -g 189 getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u 189 -s /sbin/nologin -c "cluster user" %{uname} exit 0 %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets libs %ldconfig_scriptlets cluster-libs %else %post libs -p /sbin/ldconfig %postun libs -p /sbin/ldconfig %post cluster-libs -p /sbin/ldconfig %postun cluster-libs -p /sbin/ldconfig %endif %files ########################################################### %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %if %{defined _unitdir} %{_unitdir}/pacemaker.service %else %{_initrddir}/pacemaker %endif %exclude %{_libexecdir}/pacemaker/cts-log-watcher %exclude %{_libexecdir}/pacemaker/cts-support %exclude %{_sbindir}/pacemaker-remoted %if %{with legacy_links} %exclude %{_sbindir}/pacemaker_remoted %endif %{_libexecdir}/pacemaker/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/fence_legacy %{_sbindir}/stonith_admin %doc %{_mandir}/man7/pacemaker-controld.* %doc %{_mandir}/man7/pacemaker-schedulerd.* %doc %{_mandir}/man7/pacemaker-fenced.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* %doc %{_mandir}/man7/ocf_pacemaker_o2cb.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/fence_legacy.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_mandir}/man8/stonith_admin.* %doc %{_datadir}/pacemaker/alerts %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine /usr/lib/ocf/resource.d/pacemaker/controld /usr/lib/ocf/resource.d/pacemaker/o2cb /usr/lib/ocf/resource.d/pacemaker/remote %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/pacemaker.conf %config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf %endif %files cli %dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %if %{defined _unitdir} %{_unitdir}/crm_mon.service %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/crm_mon.conf %endif %{_sbindir}/attrd_updater %{_sbindir}/cibadmin %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount %{_sbindir}/crm_mon %{_sbindir}/crm_node %{_sbindir}/crm_resource %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/crm_shadow %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket %exclude %{_datadir}/pacemaker/alerts %exclude %{_datadir}/pacemaker/tests %{_datadir}/pacemaker %exclude %{_datadir}/pacemaker/*.rng %exclude %{_datadir}/pacemaker/*.xsl %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude /usr/lib/ocf/resource.d/pacemaker/controld %exclude /usr/lib/ocf/resource.d/pacemaker/o2cb %exclude /usr/lib/ocf/resource.d/pacemaker/remote %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %doc %{_mandir}/man7/* %exclude %{_mandir}/man7/pacemaker-controld.* %exclude %{_mandir}/man7/pacemaker-schedulerd.* %exclude %{_mandir}/man7/pacemaker-fenced.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_master.* %exclude %{_mandir}/man8/fence_legacy.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/pacemaker-remoted.* %exclude %{_mandir}/man8/stonith_admin.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles %files libs %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpengine.so.* %{_libdir}/libstonithd.so.* %{_libdir}/libtransitioner.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files cluster-libs %{_libdir}/libcrmcluster.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files remote %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} # state directory is shared between the subpackets # let rpm take care of removing it once it isn't # referenced anymore and empty %ghost %dir %{_localstatedir}/lib/rpm-state/%{name} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote %endif %{_sbindir}/pacemaker-remoted %if %{with legacy_links} %{_sbindir}/pacemaker_remoted %endif %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files doc %doc %{pcmk_docdir} %license licenses/CC-BY-SA-4.0 %files cts %{python_site}/cts %{_datadir}/pacemaker/tests %{_libexecdir}/pacemaker/cts-log-watcher %{_libexecdir}/pacemaker/cts-support %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files libs-devel %{_includedir}/pacemaker %{_libdir}/*.so %if %{with coverage} %{_var}/lib/pacemaker/gcov %endif %{_libdir}/pkgconfig/*.pc %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files schemas %license licenses/GPLv2 %{_datadir}/pacemaker/*.rng %{_datadir}/pacemaker/*.xsl %changelog