diff --git a/GNUmakefile b/GNUmakefile index e79f1c43f2..9a472d96df 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,417 +1,420 @@ # # Copyright 2008-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # default: build .PHONY: default # The toplevel "clean" targets are generated from Makefile.am, not this file. # We can't use autotools' CLEANFILES, clean-local, etc. here. Instead, we # define this target, which Makefile.am can use as a dependency of clean-local. EXTRA_CLEAN_TARGETS = ancillary-clean -include Makefile # The main purpose of this GNUmakefile is that its targets can be invoked # without having to call autogen.sh and configure first. That means automake # variables may or may not be defined. Here, we use the current working # directory if a relevant variable hasn't been defined. # # The idea is to keep generated artifacts in the build tree, in case a VPATH # build is in use, but in practice it would be difficult to make the targets # here usable from a different location than the source tree. abs_srcdir ?= $(shell pwd) abs_builddir ?= $(shell pwd) PACKAGE ?= pacemaker # Definitions that specify what various targets will apply to COMMIT ?= HEAD TAG ?= $(shell T=$$(git describe --tags --exact-match '$(COMMIT)' 2>/dev/null); \ test -n "$${T}" && echo "$${T}" \ || git log --pretty=format:%H -n 1 '$(COMMIT)' 2>/dev/null || echo DIST) lparen = ( rparen = ) SHORTTAG ?= $(shell case $(TAG) in Pacemaker-*|DIST$(rparen) echo '$(TAG)' | cut -c11-;; \ *$(rparen) git log --pretty=format:%h -n 1 '$(TAG)';; esac) SHORTTAG_ABBREV = $(shell printf %s '$(SHORTTAG)' | wc -c) LAST_RC ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep rc | head -n 1) ifneq ($(origin VERSION), undefined) LAST_RELEASE ?= Pacemaker-$(VERSION) else LAST_RELEASE ?= $(shell git tag -l | grep Pacemaker | sort -Vr | grep -v rc | head -n 1) endif NEXT_RELEASE ?= $(shell echo $(LAST_RELEASE) | awk -F. '/[0-9]+\./{$$3+=1;OFS=".";print $$1,$$2,$$3}') # This Makefile can create 2 types of distributions: # # - "make dist" is automake's native functionality, based on the various # dist/nodist make variables; it always uses the current sources # # - "make export" is a custom target based on git archive and relevant entries # from .gitattributes; it defaults to current sources but can use any git tag # # Both types use the TARFILE name for the result, though they generate # different contents. distdir = $(PACKAGE)-$(SHORTTAG) TARFILE = $(abs_builddir)/$(PACKAGE)-$(SHORTTAG).tar.gz .PHONY: init init: test -e $(top_srcdir)/configure || ./autogen.sh test -e $(abs_builddir)/Makefile || $(abs_builddir)/configure .PHONY: build build: init $(MAKE) $(AM_MAKEFLAGS) core export: if [ ! -f "$(TARFILE)" ]; then \ if [ $(TAG) = dirty ]; then \ git commit -m "DO-NOT-PUSH" -a; \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" HEAD^{tree}; \ git reset --mixed HEAD^; \ else \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" $(TAG)^{tree}; \ fi; \ echo "`date`: Rebuilt $(TARFILE)"; \ else \ echo "`date`: Using existing tarball: $(TARFILE)"; \ fi ## RPM-related targets # Where to put RPM artifacts; possible values: # # - toplevel (default): RPM sources, spec, and source rpm in top-level build # directory (everything else uses the usual defaults) # # - subtree: RPM sources (i.e. TARFILE) in top-level build directory, # everything else in dedicated "rpm" subdirectory of build tree RPMDEST ?= toplevel RPM_SPEC_DIR_toplevel = $(abs_builddir) RPM_SRCRPM_DIR_toplevel = $(abs_builddir) RPM_OPTS_toplevel = --define "_sourcedir $(abs_builddir)" \ --define "_specdir $(RPM_SPEC_DIR_toplevel)" \ --define "_srcrpmdir $(RPM_SRCRPM_DIR_toplevel)" RPM_SPEC_DIR_subtree = $(abs_builddir)/rpm/SPECS RPM_SRCRPM_DIR_subtree = $(abs_builddir)/rpm/SRPMS RPM_OPTS_subtree = --define "_sourcedir $(abs_builddir)" \ --define "_topdir $(abs_builddir)/rpm" RPM_SPEC_DIR = $(RPM_SPEC_DIR_$(RPMDEST)) RPM_SRCRPM_DIR = $(RPM_SRCRPM_DIR_$(RPMDEST)) RPM_OPTS = $(RPM_OPTS_$(RPMDEST)) WITH ?= --without doc BUILD_COUNTER ?= build.counter LAST_COUNT = $(shell test ! -e $(BUILD_COUNTER) && echo 0; test -e $(BUILD_COUNTER) && cat $(BUILD_COUNTER)) COUNT = $(shell expr 1 + $(LAST_COUNT)) SPECVERSION ?= $(COUNT) MOCK_DIR = $(abs_builddir)/mock MOCK_OPTIONS ?= --resultdir=$(MOCK_DIR) --no-cleanup-after F ?= $(shell test ! -e /etc/fedora-release && echo 0; test -e /etc/fedora-release && rpm --eval %{fedora}) ARCH ?= $(shell test ! -e /etc/fedora-release && uname -m; test -e /etc/fedora-release && rpm --eval %{_arch}) MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH)) # rpmbuild wrapper that translates "--with[out] FEATURE" into RPM macros # # Unfortunately, at least recent versions of rpm do not support mentioned # switch. To work this around, we can emulate mechanism that rpm uses # internally: unfold the flags into respective macro definitions: # # --with[out] FOO -> --define "_with[out]_FOO --with[out]-FOO" # # $(1) ... WITH string (e.g., --with pre_release --without doc) # $(2) ... options following the initial "rpmbuild" in the command # $(3) ... final arguments determined with $2 (e.g., pacemaker.spec) # # Note that if $(3) is a specfile, extra case is taken so as to reflect # pcmkversion correctly (using in-place modification). # # Also note that both ways to specify long option with an argument # (i.e., what getopt and, importantly, rpm itself support) can be used: # # --with FOO # --with=FOO rpmbuild-with = \ WITH=$$(getopt -o "" -l with:,without: -- $(1)) || exit 1; \ CMD='rpmbuild $(2)'; PREREL=0; \ eval set -- "$${WITH}"; \ while true; do \ case "$$1" in \ --with) CMD="$${CMD} --define \"_with_$$2 --with-$$2\""; \ [ "$$2" != pre_release ] || PREREL=1; shift 2;; \ --without) CMD="$${CMD} --define \"_without_$$2 --without-$$2\""; \ [ "$$2" != pre_release ] || PREREL=0; shift 2;; \ --) shift ; break ;; \ *) echo "cannot parse WITH: $$1"; exit 1;; \ esac; \ done; \ case "$(3)" in \ *.spec) { [ $${PREREL} -eq 0 ] || [ $(LAST_RELEASE) = $(TAG) ]; } \ && sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3) \ || sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(NEXT_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3);; \ esac; \ CMD="$${CMD} $(3)"; \ eval "$${CMD}" # Depend on spec-clean so it gets rebuilt every time $(RPM_SPEC_DIR)/$(PACKAGE).spec: spec-clean rpm/pacemaker.spec.in $(AM_V_at)$(MKDIR_P) $(RPM_SPEC_DIR) # might not exist in VPATH build $(AM_V_GEN)if [ x != x"`git ls-files -m rpm/pacemaker.spec.in 2>/dev/null`" ]; then \ cat $(abs_srcdir)/rpm/pacemaker.spec.in; \ elif git cat-file -e $(TAG):rpm/pacemaker.spec.in 2>/dev/null; then \ git show $(TAG):rpm/pacemaker.spec.in; \ elif git cat-file -e $(TAG):pacemaker.spec.in 2>/dev/null; then \ git show $(TAG):pacemaker.spec.in; \ else \ cat $(abs_srcdir)/rpm/pacemaker.spec.in; \ fi | sed \ + -e "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" \ -e 's/global\ specversion\ .*/global\ specversion\ $(SPECVERSION)/' \ -e 's/global\ commit\ .*/global\ commit\ $(SHORTTAG)/' \ -e 's/global\ commit_abbrev\ .*/global\ commit_abbrev\ $(SHORTTAG_ABBREV)/' \ -e "s/PACKAGE_DATE/$$(date +'%a %b %d %Y')/" \ -e "s/PACKAGE_VERSION/$$(git describe --tags $(TAG) | sed -e s:Pacemaker-:: -e s:-.*::)/" \ > "$@" .PHONY: $(PACKAGE).spec $(PACKAGE).spec: $(RPM_SPEC_DIR)/$(PACKAGE).spec .PHONY: spec-clean spec-clean: -rm -f $(RPM_SPEC_DIR)/$(PACKAGE).spec .PHONY: srpm srpm: export srpm-clean $(RPM_SPEC_DIR)/$(PACKAGE).spec if [ -e $(BUILD_COUNTER) ]; then \ echo $(COUNT) > $(BUILD_COUNTER); \ fi $(call rpmbuild-with,$(WITH),-bs $(RPM_OPTS),$(RPM_SPEC_DIR)/$(PACKAGE).spec) .PHONY: srpm-clean srpm-clean: -rm -f $(RPM_SRCRPM_DIR)/*.src.rpm .PHONY: chroot chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG) @echo "Done" .PHONY: mock-next mock-next: $(MAKE) $(AM_MAKEFLAGS) F=$(shell expr 1 + $(F)) mock .PHONY: mock-rawhide mock-rawhide: $(MAKE) $(AM_MAKEFLAGS) F=rawhide mock mock-install-%: @echo "Installing packages" mock --root=$* $(MOCK_OPTIONS) --install $(MOCK_DIR)/*.rpm \ vi sudo valgrind lcov gdb fence-agents psmisc .PHONY: mock-install mock-install: mock-install-$(MOCK_CFG) @echo "Done" .PHONY: mock-sh mock-sh: mock-sh-$(MOCK_CFG) @echo "Done" mock-sh-%: @echo "Connecting" mock --root=$* $(MOCK_OPTIONS) --shell @echo "Done" mock-%: srpm mock-clean mock $(MOCK_OPTIONS) --root=$* --no-cleanup-after --rebuild \ $(WITH) $(RPM_SRCRPM_DIR)/*.src.rpm .PHONY: mock mock: mock-$(MOCK_CFG) @echo "Done" .PHONY: dirty dirty: $(MAKE) $(AM_MAKEFLAGS) TAG=dirty mock .PHONY: mock-clean mock-clean: -rm -rf $(MOCK_DIR) .PHONY: rpm-dep rpm-dep: $(RPM_SPEC_DIR)/$(PACKAGE).spec sudo yum-builddep $(PACKAGE).spec # e.g. make WITH="--with pre_release" rpm .PHONY: rpm rpm: srpm @echo To create custom builds, edit the flags and options in $(PACKAGE).spec first $(call rpmbuild-with,$(WITH),$(RPM_OPTS),--rebuild $(RPM_SRCRPM_DIR)/*.src.rpm) .PHONY: rpmlint rpmlint: $(RPM_SPEC_DIR)/$(PACKAGE).spec rpmlint -f rpm/rpmlintrc "$<" .PHONY: release release: $(MAKE) $(AM_MAKEFLAGS) TAG=$(LAST_RELEASE) rpm .PHONY: rc rc: $(MAKE) $(AM_MAKEFLAGS) TAG=$(LAST_RC) rpm ## Static analysis via coverity # Aggressiveness (low, medium, or high) COVLEVEL ?= low # Generated outputs COVERITY_DIR = $(abs_builddir)/coverity-$(TAG) COVTAR = $(abs_builddir)/$(PACKAGE)-coverity-$(TAG).tgz COVEMACS = $(abs_builddir)/$(TAG).coverity COVHTML = $(COVERITY_DIR)/output/errors # Coverity outputs are phony so they get rebuilt every invocation .PHONY: $(COVERITY_DIR) $(COVERITY_DIR): init core-clean coverity-clean $(AM_V_GEN)cov-build --dir "$@" $(MAKE) $(AM_MAKEFLAGS) core # Public coverity instance .PHONY: $(COVTAR) $(COVTAR): $(COVERITY_DIR) $(AM_V_GEN)tar czf "$@" --transform="s@.*$(TAG)@cov-int@" "$<" .PHONY: coverity coverity: $(COVTAR) @echo "Now go to https://scan.coverity.com/users/sign_in and upload:" @echo " $(COVTAR)" @echo "then make core-clean coverity-clean" # Licensed coverity instance # # The prerequisites are a little hacky; rather than actually required, some # of them are designed so that things execute in the proper order (which is # not the same as GNU make's order-only prerequisites). .PHONY: coverity-analyze coverity-analyze: $(COVERITY_DIR) @echo "" @echo "Analyzing (waiting for coverity license if necessary) ..." cov-analyze --dir "$<" --wait-for-license --security \ --aggressiveness-level "$(COVLEVEL)" .PHONY: $(COVEMACS) $(COVEMACS): coverity-analyze $(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --emacs-style > "$@" .PHONY: $(COVHTML) $(COVHTML): $(COVEMACS) $(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --html-output "$@" .PHONY: coverity-corp coverity-corp: $(COVHTML) $(MAKE) $(AM_MAKEFLAGS) core-clean @echo "Done. See:" @echo " file://$(COVHTML)/index.html" @echo "When no longer needed, make coverity-clean" # Remove all outputs regardless of tag .PHONY: coverity-clean coverity-clean: -rm -rf "$(abs_builddir)"/coverity-* \ "$(abs_builddir)"/$(PACKAGE)-coverity-*.tgz \ "$(abs_builddir)"/*.coverity ## Change log generation summary: @printf "\n* `date +"%a %b %d %Y"` `git config user.name` <`git config user.email`> $(NEXT_RELEASE)" @printf "\n- Changesets: `git log --pretty=oneline --no-merges $(LAST_RELEASE)..HEAD | wc -l`" @printf "\n- Diff:\n" @git diff $(LAST_RELEASE)..HEAD --shortstat include lib daemons tools xml rc-changes: @$(MAKE) $(AM_MAKEFLAGS) NEXT_RELEASE=$(shell echo $(LAST_RC) | sed s:-rc.*::) LAST_RELEASE=$(LAST_RC) changes changes: summary @printf "\n- Features added since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e Feature: | sed -e 's@Feature:@@' | sort -uf @printf "\n- Changes since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --no-merges --abbrev-commit $(LAST_RELEASE)..HEAD \ | grep -e High: -e Fix: -e Bug | sed \ -e 's@\(Fix\|High\|Bug\):@@' \ -e 's@\(cib\|pacemaker-based\|based\):@CIB:@' \ -e 's@\(crmd\|pacemaker-controld\|controld\):@controller:@' \ -e 's@\(lrmd\|pacemaker-execd\|execd\):@executor:@' \ -e 's@\(Fencing\|stonithd\|stonith\|pacemaker-fenced\|fenced\):@fencing:@' \ -e 's@\(PE\|pengine\|pacemaker-schedulerd\|schedulerd\):@scheduler:@' \ | sort -uf authors: git log $(LAST_RELEASE)..$(COMMIT) --format='%an' | sort -u changelog: @$(MAKE) $(AM_MAKEFLAGS) changes > ChangeLog @printf "\n">> ChangeLog git show $(LAST_RELEASE):ChangeLog >> ChangeLog DO_NOT_INDENT = lib/gnu daemons/controld/controld_fsa.h indent: find . -name "*.[ch]" -exec ./p-indent \{\} \; git co HEAD $(DO_NOT_INDENT) rel-tags: tags find . -name TAGS -exec sed -i 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \; CLANG_analyzer = $(shell which scan-build) CLANG_checkers = # Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.: # --enable={warning,style,performance,portability,information,all} # --inconclusive --std=posix CPPCHECK_ARGS ?= cppcheck: - cppcheck $(CPPCHECK_ARGS) -I include --max-configs=25 -q replace lib daemons tools + cppcheck $(CPPCHECK_ARGS) -I include --max-configs=25 \ + --library=posix --library=gnu --library=gtk \ + --inline-suppr -q replace lib daemons tools clang: test -e $(CLANG_analyzer) scan-build $(CLANG_checkers:%=-enable-checker %) $(MAKE) $(AM_MAKEFLAGS) clean all # V3 = scandir unsetenv alphasort xalloc # V2 = setenv strerror strchrnul strndup # https://www.gnu.org/software/gnulib/manual/html_node/Initial-import.html#Initial-import # previously, this was crypto/md5, but got spoiled with streams/kernel crypto GNU_MODS = crypto/md5-buffer # stdint appears to be surrogate only for C99-lacking environments GNU_MODS_AVOID = stdint # only for plain crypto/md5: we make do without kernel-assisted crypto # GNU_MODS_AVOID += crypto/af_alg gnulib-update: -test -e maint/gnulib \ || git clone https://git.savannah.gnu.org/git/gnulib.git maint/gnulib cd maint/gnulib && git pull maint/gnulib/gnulib-tool \ --source-base=lib/gnu --lgpl=2 --no-vc-files --no-conditional-dependencies \ $(GNU_MODS_AVOID:%=--avoid %) --import $(GNU_MODS) ancillary-clean: spec-clean srpm-clean mock-clean coverity-clean -rm -f $(TARFILE) diff --git a/configure.ac b/configure.ac index ecb84a7995..89787ed1ff 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2066 +1,2065 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2020 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) AC_CONFIG_MACRO_DIR([m4]) AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) dnl 1.11: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl silent-rules: allow "--enable-silent-rules" (no-op in 1.13+) dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.11 foreign tar-ustar silent-rules subdir-objects]) dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ dnl Versioned attributes implementation is not yet production-ready AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes]) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd GLIB_TESTS dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AC_PROG_CC_STDC AC_PROG_CXX dnl C++ is not needed for build, just maintainer utilities gl_EARLY # We expressly don't want to introduce OpenSSL dependency by default. gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT LT_INIT([dlopen]) LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)], [RC=1; AC_MSG_RESULT(no)]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } dnl =============================================== dnl Configure Options dnl =============================================== dnl --enable-* options AC_ARG_ENABLE([ansi], [AS_HELP_STRING([--enable-ansi], [force GCC to compile to ANSI standard for older compilers. @<:@no@:>@])], ) AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@yes@:>@])], ) AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])], ) AC_ARG_ENABLE([no-stack], [AS_HELP_STRING([--enable-no-stack], [build only the scheduler and its requirements @<:@no@:>@])], ) AC_ARG_ENABLE([upstart], [AS_HELP_STRING([--enable-upstart], [enable support for managing resources via Upstart @<:@try@:>@])], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])], [], [enable_systemd=try], ) AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) # By default, we add symlinks at the pre-2.0.0 daemon name locations, so that: # (1) tools that directly invoke those names for metadata etc. will still work # (2) this installation can be used in a bundle container image used with # cluster hosts running Pacemaker 1.1.17+ # If you know your target systems will not have any need for it, you can # disable this option. Once the above use cases are no longer in wide use, we # can disable this option by default, and once we no longer want to support # them at all, we can drop the option altogether. AC_ARG_ENABLE([legacy-links], [AS_HELP_STRING([--enable-legacy-links], [add symlinks for old daemon names @<:@yes@:>@])], [ LEGACY_LINKS="${enableval}" ], [ LEGACY_LINKS=yes ], ) AM_CONDITIONAL(BUILD_LEGACY_LINKS, test "x${LEGACY_LINKS}" = "xyes") dnl --with-* options AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACKAGE_VERSION="$withval" ])] ) VERSION_ARG(VERSION_NUMBER) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer])], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios remote monitoring])], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) AC_ARG_WITH([acl], [AS_HELP_STRING([--with-acl], [support CIB ACL])], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets])], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) PCMK_GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], [directory for init (rc) scripts])], [ INITDIR="$withval" ] ) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) SUPPORT_PROFILING=0 AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations for effective profiling])], [ SUPPORT_PROFILING=$withval ] ) AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations for effective profiling])], [ SUPPORT_COVERAGE=$withval ] ) PUBLICAN_BRAND="common" AC_ARG_WITH([brand], [AS_HELP_STRING([--with-brand=brand], [brand to use for generated documentation (set empty for no docs) @<:@common@:>@])], [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ] ) AC_SUBST(PUBLICAN_BRAND) BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], [address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@])], [ BUG_URL="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) dnl The not-yet-released autoconf 2.70 will have a --runstatedir option. dnl Until that's available, emulate it with our own --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) dnl This defaults to /usr/lib rather than libdir because it's determined by the dnl OCF project and not pacemaker. Even if a user wants to install pacemaker to dnl /usr/local or such, the OCF agents will be expected in their usual dnl location. However, we do give the user the option to override it. OCF_ROOT_DIR="/usr/lib/ocf" AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], [OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@/usr/lib/ocf@:>@])], [ OCF_ROOT_DIR="$withval" ] ) AC_SUBST(OCF_ROOT_DIR) CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) dnl Deprecated options AC_ARG_WITH([pkg-name], [AS_HELP_STRING([--with-pkg-name=name], [deprecated and unused (will be removed in a future release)])], ) AC_ARG_WITH([pkgname], [AS_HELP_STRING([--with-pkgname=name], [deprecated and unused (will be removed in a future release)])], ) dnl =============================================== dnl General Processing dnl =============================================== AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", [Current pacemaker version]) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) AC_PROG_LN_S AC_PROG_MKDIR_P if cc_supports_flag -Werror; then WERROR="-Werror" else WERROR="" fi # Normalize enable_fatal_warnings (defaulting to yes, when compiler supports it) if test "x${enable_fatal_warnings}" != "xno" ; then if test "$GCC" = "yes" && test "x${WERROR}" != "x" ; then enable_fatal_warnings=yes else AC_MSG_NOTICE(Compiler does not support fatal warnings) enable_fatal_warnings=no fi fi INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in prefix|NONE) exec_prefix=$prefix ;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR) ;; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in prefix|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables if [ test "x${runstatedir}" = "x" ]; then if [ test "x${pcmk_runstatedir}" = "x" ]; then runstatedir="${localstatedir}/run" else runstatedir="${pcmk_runstatedir}" fi fi eval runstatedir="$(eval echo ${runstatedir})" AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) if test x"${CRM_LOG_DIR}" = x""; then CRM_LOG_DIR="${localstatedir}/log/pacemaker" fi AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) if test x"${CRM_BUNDLE_DIR}" = x""; then CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles" fi AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) if test x"${PCMK_GNUTLS_PRIORITIES}" = x""; then AC_MSG_ERROR([Empty string not applicable with --with-gnutls-priorities]) fi AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) if test x"${BUG_URL}" = x""; then BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker" fi AC_SUBST(BUG_URL) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([US_AUTH_PEERCRED_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([US_AUTH_PEERCRED_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) if test -z "${us_auth}"; then # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([US_AUTH_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([US_AUTH_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_ERROR([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) fi dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac ;; esac # C99 doesn't guarantee uint64_t type and related format specifiers, but # prerequisites, corosync + libqb, use that widely, so the target platforms # are already pre-constrained to those "64bit-clean" (doesn't imply native # bit width) and hence we deliberately refrain from artificial surrogates # (sans manipulation through cached values). AC_CACHE_VAL( [pcmk_cv_decl_inttypes], [ AC_CHECK_DECLS( [PRIu64, PRIu32, PRIx32, SCNu64], [pcmk_cv_decl_inttypes="PRIu64 PRIu32 PRIx32 SCNu64"], [ # test shall only react on "no" cached result & error out respectively if test "x$ac_cv_have_decl_PRIu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint64_t (PRIu64)]) elif test "x$ac_cv_have_decl_PRIu32" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint32_t (PRIu32)]) elif test "x$ac_cv_have_decl_PRIx32" = xno; then AC_MSG_ERROR([lack of inttypes.h based hexa specifier serving uint32_t (PRIx32)]) elif test "x$ac_cv_have_decl_SCNu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier gathering uint64_t (SCNu64)]) fi ], [[#include ]] ) ] ) ( set $pcmk_cv_decl_inttypes AC_DEFINE_UNQUOTED([U64T], [$1], [Correct format specifier for U64T]) AC_DEFINE_UNQUOTED([U32T], [$2], [Correct format specifier for U32T]) AC_DEFINE_UNQUOTED([X32T], [$3], [Correct format specifier for X32T]) AC_DEFINE_UNQUOTED([U64TS], [$4], [Correct format specifier for U64TS]) ) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path AC_PATH_PROG([PYTHON], [$PYTHON]) case "x$PYTHON" in x*python3*|x*platform-python*) dnl When used with Python 3, Pacemaker requires a minimum of 3.2 AM_PATH_PYTHON([3.2]) ;; *) dnl Otherwise, Pacemaker requires a minimum of 2.7 AM_PATH_PYTHON([2.7]) ;; esac AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([PUBLICAN], [publican]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) dnl BASH is already an environment variable, so use something else AC_PATH_PROG([BASH_PATH], [bash]) PKG_PROG_PKG_CONFIG # PKG_NOARCH_INSTALLDIR not available prior to pkg-config 0.27 and # pkgconf 0.8.10, respectively (next line is to mimic that scenario) dnl m4_ifdef([PKG_NOARCH_INSTALLDIR], [m4_undefine([PKG_NOARCH_INSTALLDIR])]) m4_ifndef([PKG_NOARCH_INSTALLDIR], [ AC_DEFUN([PKG_NOARCH_INSTALLDIR], [ AC_SUBST([noarch_pkgconfigdir], ['${datadir}/pkgconfig']) ]) ]) PKG_NOARCH_INSTALLDIR AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi dnl Bash is needed for building man pages and running regression tests if test x"${BASH_PATH}" = x""; then AC_MSG_ERROR(bash must be installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) if test "x${ASCIIDOC_CONV}" != x; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi publican_intree_brand=no if test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""; then dnl special handling for clusterlabs brand (possibly in-tree version used) test "${PUBLICAN_BRAND}" != "clusterlabs" \ || test -d /usr/share/publican/Common_Content/clusterlabs if test $? -ne 0; then dnl Unknown option: brand_dir vs. Option brand_dir requires an argument if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common]) PUBLICAN_BRAND=common else publican_intree_brand=yes fi fi AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand]) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi AM_CONDITIONAL([BUILD_DOCBOOK], [test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""]) AM_CONDITIONAL([PUBLICAN_INTREE_BRAND], [test x"${publican_intree_brand}" = x"yes"]) AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x""]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH; do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then $GETOPT_PATH -T >/dev/null 2>/dev/null if test $? -eq 4; then break fi fi GETOPT_PATH="" done IFS=$IFS_orig if test -n "$GETOPT_PATH"; then AC_MSG_RESULT([$GETOPT_PATH]) else AC_MSG_RESULT([no]) AC_MSG_ERROR(Pacemaker build requires a GNU-compatible getopt) fi AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolerably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolerably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_FUNCS([sched_setscheduler]) if test "$ac_cv_func_sched_setscheduler" != yes; then PC_LIBS_RT="" else PC_LIBS_RT="-lrt" fi AC_SUBST(PC_LIBS_RT) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi if test x"${PKG_CONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi # Require glib 2.16.0 (2008-03) or later for g_hash_table_iter_init() etc. PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.16.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) if test "x${enable_fatal_warnings}" = xyes ; then cc_temp_flags "$CFLAGS $WERROR" fi AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/swab.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/reboot.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/signalfd.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) if test "x${enable_fatal_warnings}" = xyes ; then cc_restore_flags fi dnl These headers need prerequisites before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) AC_CHECK_HEADERS(libxml/xpath.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(libxml development headers not found) fi AC_CHECK_LIB(xslt, xsltApplyStylesheet, [], AC_MSG_ERROR(Unsupported libxslt library version)) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(libxslt development headers not found) fi AC_CACHE_CHECK(whether __progname and __progname_full are available, pf_cv_var_progname, AC_TRY_LINK([extern char *__progname, *__progname_full;], [__progname = "foo"; __progname_full = "foo bar";], pf_cv_var_progname="yes", pf_cv_var_progname="no")) if test "$pf_cv_var_progname" = "yes"; then AC_DEFINE(HAVE___PROGNAME,1,[ ]) fi dnl ======================================================================== dnl Generic declarations dnl ======================================================================== AC_CHECK_DECLS([CLOCK_MONOTONIC], [], [], [[ #include ]]) # the above alone will allow using clock_gettime(CLOCK_MONOTONIC, ...), # but in case there are any discrepancies found with the move onto that # where ftime(3) was originally used -- the callsites make do without # any such timestamp grabbing at this time -- so to revert that effect # (trigger such omission), line below can be uncommented for that intent # (alternatively, propagate equivalent variable assignment from outside) #CPPFLAGS="-DPCMK_TIME_EMERGENCY_CGT $CPPFLAGS" dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) -AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) if test "$pf_cv_var_sscanf" = "yes"; then AC_DEFINE(SSCANF_HAS_M, 1, [ ]) fi dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_TRY_COMPILE([#include ],[sighandler_t *f;], has_sighandler_t=yes,has_sighandler_t=no) AC_MSG_RESULT($has_sighandler_t) if test "$has_sighandler_t" = "yes" ; then AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual; then ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" cc_temp_flags "-Wcast-qual $WERROR" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 if cc_supports_flag -fPIC; then CFLAGS="$CFLAGS -fPIC" fi AC_MSG_CHECKING(whether printw() requires argument of "const char *") AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [pcmk_cv_compatible_printw=yes], [pcmk_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS cc_restore_flags AC_MSG_RESULT([$pcmk_cv_compatible_printw]) if test "$pcmk_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0 ;; esac AC_MSG_NOTICE(New CFLAGS: $CFLAGS) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== if test x${enable_no_stack} = xyes; then SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb >= 0.13) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 0.14.0+ (2012-06) AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" dnl libqb 0.17.0+ (2014-02) AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function])) dnl libqb not yet released (as of 2018-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_CHECK_HEADERS(stonith/stonith.h) if test "$ac_cv_header_stonith_stonith_h" = "yes"; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) if test x"${CRM_DAEMON_USER}" = x""; then CRM_DAEMON_USER="hacluster" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) if test x"${CRM_DAEMON_GROUP}" = x""; then CRM_DAEMON_GROUP="haclient" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) OCF_RA_DIR="$OCF_ROOT_DIR/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) RH_STONITH_DIR="$sbindir" AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir", Location for system binaries) RH_STONITH_PREFIX="fence_" AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test $HAVE_dbus = 0; then PC_NAME_DBUS="" else PC_NAME_DBUS="dbus-1" fi AC_SUBST(PC_NAME_DBUS) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without DBus]) else enable_systemd=no fi fi if test $(echo "$CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without clock_gettime(CLOCK_MONOTONIC, ...)]) else enable_systemd=no fi fi if test "x${enable_systemd}" = xtry; then AC_MSG_CHECKING([for systemd version query result via dbus-send]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || systemctl --version 2>/dev/null | grep -q systemd; then enable_systemd=yes else enable_systemd=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) HAVE_systemd=0 if test "x${enable_systemd}" = xyes; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) if test "x${systemdsystemunitdir}" = x""; then AC_MSG_FAILURE([cannot enable systemd when systemdsystemunitdir unresolved]) fi fi AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then AC_MSG_FAILURE([cannot enable Upstart without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then AC_MSG_CHECKING([for Upstart version query result via dbus-send]) ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) HAVE_upstart=0 if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) case $SUPPORT_NAGIOS in 1|yes|true) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then AC_MSG_FAILURE([cannot enable nagios without clock_gettime(CLOCK_MONOTONIC, ...)]) fi SUPPORT_NAGIOS=1 ;; try) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then SUPPORT_NAGIOS=0 else SUPPORT_NAGIOS=1 fi ;; *) SUPPORT_NAGIOS=0 ;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_CS=no ;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS) SUPPORT_CS=1 PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal PKG_CHECK_MODULES(libcorosync_common, libcorosync_common) dnl Fatal CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-native" fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AC_SUBST(SUPPORT_COROSYNC) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported, building only the scheduler) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_ACL=no ;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) SUPPORT_ACL=1 AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then SUPPORT_ACL=0 fi if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1 ;; *) SUPPORT_CIBSECRETS=0 ;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) if test $SUPPORT_CIBSECRETS = 1; then PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) fi dnl ======================================================================== dnl GnuTLS dnl ======================================================================== dnl gnutls_priority_set_direct available since 2.1.7 (released 2007-11-29) AC_CHECK_LIB(gnutls, gnutls_priority_set_direct) if test "$ac_cv_lib_gnutls_gnutls_priority_set_direct" != ""; then AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_FUNCS([gnutls_sec_param_to_pk_bits]) dnl since 2.12.0 (2011-03-24) if test "$ac_cv_header_gnutls_gnutls_h" != "yes"; then PC_NAME_GNUTLS="" else PC_NAME_GNUTLS="gnutls" fi AC_SUBST(PC_NAME_GNUTLS) fi dnl ======================================================================== dnl PAM dnl ======================================================================== AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKG_CONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKG_CONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" else CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags if cc_supports_flag "-Wformat-nonliteral"; then gcc_format_nonliteral=yes else gcc_format_nonliteral=no fi # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat-security -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wwrite-strings -Wunused-but-set-variable -Wunsigned-char" if test "x$gcc_diagnostic_push_pull" = "xyes"; then AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" else if test "x$gcc_format_nonliteral" = "xyes"; then EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" fi fi # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $CC_EXTRAS $j then CC_EXTRAS="$CC_EXTRAS $j" fi done if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl cib, stonithd, pacemaker-remoted, pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi if test "x${HARDENING}" = "xno"; then AC_MSG_NOTICE([Hardening: explicitly disabled]) elif test "x${HARDENING}" = "xyes" \ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then dnl We'll figure out on our own... CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; relro=1]) # daemons: PIE for both CFLAGS and LDFLAGS if cc_supports_flag -fPIE; then flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; pie=1]) fi # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries if test "${relro}" = 1 && test "${pie}" = 1; then flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; bindnow=1]) fi if test "${bindnow}" = 1; then flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"]) fi # universal: prefer strong > all > default stack protector if possible flag= if cc_supports_flag -fstack-protector-strong; then flag="-fstack-protector-strong" elif cc_supports_flag -fstack-protector-all; then flag="-fstack-protector-all" elif cc_supports_flag -fstack-protector; then flag="-fstack-protector" fi if test -n "${flag}"; then CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 fi if test "${relro}" = 1 \ || test "${pie}" = 1 \ || test "${stackprot}" = 1; then AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) else AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) fi else AC_MSG_NOTICE([Hardening: using custom flags]) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS $WERROR" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant fi AC_MSG_RESULT(Suppress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl Files we output that need to be executable AC_CONFIG_FILES([cts/CTSlab.py], [chmod +x cts/CTSlab.py]) AC_CONFIG_FILES([cts/LSBDummy], [chmod +x cts/LSBDummy]) AC_CONFIG_FILES([cts/OCFIPraTest.py], [chmod +x cts/OCFIPraTest.py]) AC_CONFIG_FILES([cts/cluster_test], [chmod +x cts/cluster_test]) AC_CONFIG_FILES([cts/cts], [chmod +x cts/cts]) AC_CONFIG_FILES([cts/cts-cli], [chmod +x cts/cts-cli]) AC_CONFIG_FILES([cts/cts-coverage], [chmod +x cts/cts-coverage]) AC_CONFIG_FILES([cts/cts-exec], [chmod +x cts/cts-exec]) AC_CONFIG_FILES([cts/cts-fencing], [chmod +x cts/cts-fencing]) AC_CONFIG_FILES([cts/cts-log-watcher], [chmod +x cts/cts-log-watcher]) AC_CONFIG_FILES([cts/cts-regression], [chmod +x cts/cts-regression]) AC_CONFIG_FILES([cts/cts-scheduler], [chmod +x cts/cts-scheduler]) AC_CONFIG_FILES([cts/cts-support], [chmod +x cts/cts-support]) AC_CONFIG_FILES([cts/lxc_autogen.sh], [chmod +x cts/lxc_autogen.sh]) AC_CONFIG_FILES([cts/benchmark/clubench], [chmod +x cts/benchmark/clubench]) AC_CONFIG_FILES([cts/fence_dummy], [chmod +x cts/fence_dummy]) AC_CONFIG_FILES([cts/pacemaker-cts-dummyd], [chmod +x cts/pacemaker-cts-dummyd]) AC_CONFIG_FILES([daemons/fenced/fence_legacy], [chmod +x daemons/fenced/fence_legacy]) AC_CONFIG_FILES([doc/abi-check], [chmod +x doc/abi-check]) AC_CONFIG_FILES([extra/resources/ClusterMon], [chmod +x extra/resources/ClusterMon]) AC_CONFIG_FILES([extra/resources/HealthSMART], [chmod +x extra/resources/HealthSMART]) AC_CONFIG_FILES([extra/resources/SysInfo], [chmod +x extra/resources/SysInfo]) AC_CONFIG_FILES([extra/resources/ifspeed], [chmod +x extra/resources/ifspeed]) AC_CONFIG_FILES([extra/resources/o2cb], [chmod +x extra/resources/o2cb]) AC_CONFIG_FILES([tools/crm_failcount], [chmod +x tools/crm_failcount]) AC_CONFIG_FILES([tools/crm_master], [chmod +x tools/crm_master]) AC_CONFIG_FILES([tools/crm_report], [chmod +x tools/crm_report]) AC_CONFIG_FILES([tools/crm_standby], [chmod +x tools/crm_standby]) AC_CONFIG_FILES([tools/cibsecret], [chmod +x tools/cibsecret]) AC_CONFIG_FILES([tools/pcmk_simtimes], [chmod +x tools/pcmk_simtimes]) dnl Other files we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTS.py \ cts/CTSvars.py \ cts/benchmark/Makefile \ cts/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/schedulerd/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/Clusters_from_Scratch/publican.cfg \ doc/Pacemaker_Administration/publican.cfg \ doc/Pacemaker_Development/publican.cfg \ doc/Pacemaker_Explained/publican.cfg \ doc/Pacemaker_Remote/publican.cfg \ doc/sphinx/Makefile \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ replace/Makefile \ lib/Makefile \ lib/libpacemaker.pc \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/gnu/Makefile \ lib/pacemaker/Makefile \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/rules/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ maint/Makefile \ tests/Makefile \ tools/Makefile \ tools/report.collector \ tools/report.common \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_RESULT([ Unix socket auth method = ${us_auth}]) diff --git a/daemons/based/based_callbacks.c b/daemons/based/based_callbacks.c index ed07105c07..67597fb863 100644 --- a/daemons/based/based_callbacks.c +++ b/daemons/based/based_callbacks.c @@ -1,1548 +1,1550 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include /* U64T ~ PRIu64 */ #include #include #include #include #include #include #include #define EXIT_ESCALATION_MS 10000 static unsigned long cib_local_bcast_num = 0; typedef struct cib_local_notify_s { xmlNode *notify_src; char *client_id; gboolean from_peer; gboolean sync_reply; } cib_local_notify_t; int next_client_id = 0; gboolean legacy_mode = FALSE; qb_ipcs_service_t *ipcs_ro = NULL; qb_ipcs_service_t *ipcs_rw = NULL; qb_ipcs_service_t *ipcs_shm = NULL; void send_cib_replace(const xmlNode * sync_request, const char *host); static void cib_process_request(xmlNode* request, gboolean force_synchronous, gboolean privileged, pcmk__client_t *cib_client); static int cib_process_command(xmlNode *request, xmlNode **reply, xmlNode **cib_diff, gboolean privileged); gboolean cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean privileged); gboolean cib_legacy_mode(void) { return legacy_mode; } static int32_t cib_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { if (cib_shutdown_flag) { crm_info("Ignoring new IPC client [%d] during shutdown", pcmk__client_pid(c)); return -EPERM; } if (pcmk__new_client(c, uid, gid) == NULL) { return -EIO; } return 0; } static int32_t cib_ipc_dispatch_rw(qb_ipcs_connection_t * c, void *data, size_t size) { pcmk__client_t *client = pcmk__find_client(c); crm_trace("%p message from %s", c, client->id); return cib_common_callback(c, data, size, TRUE); } static int32_t cib_ipc_dispatch_ro(qb_ipcs_connection_t * c, void *data, size_t size) { pcmk__client_t *client = pcmk__find_client(c); crm_trace("%p message from %s", c, client->id); return cib_common_callback(c, data, size, FALSE); } /* Error code means? */ static int32_t cib_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); pcmk__free_client(client); return 0; } static void cib_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); cib_ipc_closed(c); if (cib_shutdown_flag) { cib_shutdown(0); } } struct qb_ipcs_service_handlers ipc_ro_callbacks = { .connection_accept = cib_ipc_accept, .connection_created = NULL, .msg_process = cib_ipc_dispatch_ro, .connection_closed = cib_ipc_closed, .connection_destroyed = cib_ipc_destroy }; struct qb_ipcs_service_handlers ipc_rw_callbacks = { .connection_accept = cib_ipc_accept, .connection_created = NULL, .msg_process = cib_ipc_dispatch_rw, .connection_closed = cib_ipc_closed, .connection_destroyed = cib_ipc_destroy }; void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, pcmk__client_t *cib_client, gboolean privileged) { const char *op = crm_element_value(op_request, F_CIB_OPERATION); if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { if (flags & crm_ipc_client_response) { xmlNode *ack = create_xml_node(NULL, __FUNCTION__); crm_xml_add(ack, F_CIB_OPERATION, CRM_OP_REGISTER); crm_xml_add(ack, F_CIB_CLIENTID, cib_client->id); pcmk__ipc_send_xml(cib_client, id, ack, flags); cib_client->request_id = 0; free_xml(ack); } return; } else if (crm_str_eq(op, T_CIB_NOTIFY, TRUE)) { /* Update the notify filters for this client */ int on_off = 0; long long bit = 0; const char *type = crm_element_value(op_request, F_CIB_NOTIFY_TYPE); crm_element_value_int(op_request, F_CIB_NOTIFY_ACTIVATE, &on_off); crm_debug("Setting %s callbacks for %s (%s): %s", type, cib_client->name, cib_client->id, on_off ? "on" : "off"); if (safe_str_eq(type, T_CIB_POST_NOTIFY)) { bit = cib_notify_post; } else if (safe_str_eq(type, T_CIB_PRE_NOTIFY)) { bit = cib_notify_pre; } else if (safe_str_eq(type, T_CIB_UPDATE_CONFIRM)) { bit = cib_notify_confirm; } else if (safe_str_eq(type, T_CIB_DIFF_NOTIFY)) { bit = cib_notify_diff; } else if (safe_str_eq(type, T_CIB_REPLACE_NOTIFY)) { bit = cib_notify_replace; } if (on_off) { set_bit(cib_client->options, bit); } else { clear_bit(cib_client->options, bit); } if (flags & crm_ipc_client_response) { /* TODO - include rc */ pcmk__ipc_send_ack(cib_client, id, flags, "ack"); } return; } cib_process_request(op_request, FALSE, privileged, cib_client); } int32_t cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean privileged) { uint32_t id = 0; uint32_t flags = 0; int call_options = 0; pcmk__client_t *cib_client = pcmk__find_client(c); xmlNode *op_request = pcmk__client_data2xml(cib_client, data, &id, &flags); if (op_request) { crm_element_value_int(op_request, F_CIB_CALLOPTS, &call_options); } if (op_request == NULL) { crm_trace("Invalid message from %p", c); pcmk__ipc_send_ack(cib_client, id, flags, "nack"); return 0; } else if(cib_client == NULL) { crm_trace("Invalid client %p", c); return 0; } if (is_set(call_options, cib_sync_call)) { CRM_LOG_ASSERT(flags & crm_ipc_client_response); CRM_LOG_ASSERT(cib_client->request_id == 0); /* This means the client has two synchronous events in-flight */ cib_client->request_id = id; /* Reply only to the last one */ } if (cib_client->name == NULL) { const char *value = crm_element_value(op_request, F_CIB_CLIENTNAME); if (value == NULL) { cib_client->name = crm_itoa(cib_client->pid); } else { cib_client->name = strdup(value); if (crm_is_daemon_name(value)) { set_bit(cib_client->options, cib_is_daemon); } } } /* Allow cluster daemons more leeway before being evicted */ if (is_set(cib_client->options, cib_is_daemon)) { const char *qmax = cib_config_lookup("cluster-ipc-limit"); if (pcmk__set_client_queue_max(cib_client, qmax)) { crm_trace("IPC threshold for %s[%u] is now %u", cib_client->name, cib_client->pid, cib_client->queue_max); } } crm_xml_add(op_request, F_CIB_CLIENTID, cib_client->id); crm_xml_add(op_request, F_CIB_CLIENTNAME, cib_client->name); #if ENABLE_ACL CRM_LOG_ASSERT(cib_client->user != NULL); pcmk__update_acl_user(op_request, F_CIB_USER, cib_client->user); #endif cib_common_callback_worker(id, flags, op_request, cib_client, privileged); free_xml(op_request); return 0; } static uint64_t ping_seq = 0; static char *ping_digest = NULL; static bool ping_modified_since = FALSE; int sync_our_cib(xmlNode * request, gboolean all); static gboolean cib_digester_cb(gpointer data) { if (cib_is_master) { char buffer[32]; xmlNode *ping = create_xml_node(NULL, "ping"); ping_seq++; free(ping_digest); ping_digest = NULL; ping_modified_since = FALSE; snprintf(buffer, 32, "%" U64T, ping_seq); crm_trace("Requesting peer digests (%s)", buffer); crm_xml_add(ping, F_TYPE, "cib"); crm_xml_add(ping, F_CIB_OPERATION, CRM_OP_PING); crm_xml_add(ping, F_CIB_PING_ID, buffer); crm_xml_add(ping, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); send_cluster_message(NULL, crm_msg_cib, ping, TRUE); free_xml(ping); } return FALSE; } static void process_ping_reply(xmlNode *reply) { uint64_t seq = 0; const char *host = crm_element_value(reply, F_ORIG); xmlNode *pong = get_message_xml(reply, F_CIB_CALLDATA); const char *seq_s = crm_element_value(pong, F_CIB_PING_ID); const char *digest = crm_element_value(pong, XML_ATTR_DIGEST); if (seq_s) { seq = (uint64_t) crm_parse_ll(seq_s, NULL); } if(digest == NULL) { crm_trace("Ignoring ping reply %s from %s with no digest", seq_s, host); } else if(seq != ping_seq) { crm_trace("Ignoring out of sequence ping reply %s from %s", seq_s, host); } else if(ping_modified_since) { crm_trace("Ignoring ping reply %s from %s: cib updated since", seq_s, host); } else { const char *version = crm_element_value(pong, XML_ATTR_CRM_VERSION); if(ping_digest == NULL) { crm_trace("Calculating new digest"); ping_digest = calculate_xml_versioned_digest(the_cib, FALSE, TRUE, version); } crm_trace("Processing ping reply %s from %s (%s)", seq_s, host, digest); if(safe_str_eq(ping_digest, digest) == FALSE) { xmlNode *remote_cib = get_message_xml(pong, F_CIB_CALLDATA); crm_notice("Local CIB %s.%s.%s.%s differs from %s: %s.%s.%s.%s %p", crm_element_value(the_cib, XML_ATTR_GENERATION_ADMIN), crm_element_value(the_cib, XML_ATTR_GENERATION), crm_element_value(the_cib, XML_ATTR_NUMUPDATES), ping_digest, host, remote_cib?crm_element_value(remote_cib, XML_ATTR_GENERATION_ADMIN):"_", remote_cib?crm_element_value(remote_cib, XML_ATTR_GENERATION):"_", remote_cib?crm_element_value(remote_cib, XML_ATTR_NUMUPDATES):"_", digest, remote_cib); if(remote_cib && remote_cib->children) { /* Additional debug */ xml_calculate_changes(the_cib, remote_cib); xml_log_changes(LOG_INFO, __FUNCTION__, remote_cib); crm_trace("End of differences"); } free_xml(remote_cib); sync_our_cib(reply, FALSE); } } } static void do_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { int rid = 0; int call_id = 0; pcmk__client_t *client_obj = NULL; CRM_ASSERT(notify_src && client_id); crm_element_value_int(notify_src, F_CIB_CALLID, &call_id); client_obj = pcmk__find_client_by_id(client_id); if (client_obj == NULL) { crm_debug("Could not send response %d: client %s not found", call_id, client_id); return; } if (sync_reply) { if (client_obj->ipcs) { CRM_LOG_ASSERT(client_obj->request_id); rid = client_obj->request_id; client_obj->request_id = 0; crm_trace("Sending response %d to %s %s", rid, client_obj->name, from_peer ? "(originator of delegated request)" : ""); } else { crm_trace("Sending response [call %d] to %s %s", call_id, client_obj->name, from_peer ? "(originator of delegated request)" : ""); } } else { crm_trace("Sending event %d to %s %s", call_id, client_obj->name, from_peer ? "(originator of delegated request)" : ""); } switch (client_obj->kind) { case PCMK__CLIENT_IPC: { int rc = pcmk__ipc_send_xml(client_obj, rid, notify_src, (sync_reply? crm_ipc_flags_none : crm_ipc_server_event)); if (rc != pcmk_rc_ok) { crm_warn("%s reply to %s failed: %s " CRM_XS " rc=%d", (sync_reply? "Synchronous" : "Asynchronous"), client_obj->name, pcmk_rc_str(rc), rc); } } break; #ifdef HAVE_GNUTLS_GNUTLS_H case PCMK__CLIENT_TLS: #endif case PCMK__CLIENT_TCP: pcmk__remote_send_xml(client_obj->remote, notify_src); break; default: crm_err("Unknown transport %d for %s", client_obj->kind, client_obj->name); } } static void local_notify_destroy_callback(gpointer data) { cib_local_notify_t *notify = data; free_xml(notify->notify_src); free(notify->client_id); free(notify); } static void check_local_notify(int bcast_id) { cib_local_notify_t *notify = NULL; if (!local_notify_queue) { return; } notify = g_hash_table_lookup(local_notify_queue, GINT_TO_POINTER(bcast_id)); if (notify) { do_local_notify(notify->notify_src, notify->client_id, notify->sync_reply, notify->from_peer); g_hash_table_remove(local_notify_queue, GINT_TO_POINTER(bcast_id)); } } static void queue_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { cib_local_notify_t *notify = calloc(1, sizeof(cib_local_notify_t)); notify->notify_src = notify_src; notify->client_id = strdup(client_id); notify->sync_reply = sync_reply; notify->from_peer = from_peer; if (!local_notify_queue) { local_notify_queue = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, local_notify_destroy_callback); } + // notify will get freed when the hash table is destroyed + // cppcheck-suppress memleak g_hash_table_insert(local_notify_queue, GINT_TO_POINTER(cib_local_bcast_num), notify); } static void parse_local_options_v1(pcmk__client_t *cib_client, int call_type, int call_options, const char *host, const char *op, gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward) { if (cib_op_modifies(call_type) && !(call_options & cib_inhibit_bcast)) { /* we need to send an update anyway */ *needs_reply = TRUE; } else { *needs_reply = FALSE; } if (host == NULL && (call_options & cib_scope_local)) { crm_trace("Processing locally scoped %s op from %s", op, cib_client->name); *local_notify = TRUE; } else if (host == NULL && cib_is_master) { crm_trace("Processing master %s op locally from %s", op, cib_client->name); *local_notify = TRUE; } else if (safe_str_eq(host, cib_our_uname)) { crm_trace("Processing locally addressed %s op from %s", op, cib_client->name); *local_notify = TRUE; } else if (stand_alone) { *needs_forward = FALSE; *local_notify = TRUE; *process = TRUE; } else { crm_trace("%s op from %s needs to be forwarded to %s", op, cib_client->name, host ? host : "the master instance"); *needs_forward = TRUE; *process = FALSE; } } static void parse_local_options_v2(pcmk__client_t *cib_client, int call_type, int call_options, const char *host, const char *op, gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward) { if (cib_op_modifies(call_type)) { if(safe_str_eq(op, CIB_OP_MASTER) || safe_str_eq(op, CIB_OP_SLAVE)) { /* Always handle these locally */ *process = TRUE; *needs_reply = FALSE; *local_notify = TRUE; *needs_forward = FALSE; return; } else { /* Redirect all other updates via CPG */ *needs_reply = TRUE; *needs_forward = TRUE; *process = FALSE; crm_trace("%s op from %s needs to be forwarded to %s", op, cib_client->name, host ? host : "the master instance"); return; } } *process = TRUE; *needs_reply = FALSE; *local_notify = TRUE; *needs_forward = FALSE; if (stand_alone) { crm_trace("Processing %s op from %s (stand-alone)", op, cib_client->name); } else if (host == NULL) { crm_trace("Processing unaddressed %s op from %s", op, cib_client->name); } else if (safe_str_eq(host, cib_our_uname)) { crm_trace("Processing locally addressed %s op from %s", op, cib_client->name); } else { crm_trace("%s op from %s needs to be forwarded to %s", op, cib_client->name, host); *needs_forward = TRUE; *process = FALSE; } } static void parse_local_options(pcmk__client_t *cib_client, int call_type, int call_options, const char *host, const char *op, gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward) { if(cib_legacy_mode()) { parse_local_options_v1(cib_client, call_type, call_options, host, op, local_notify, needs_reply, process, needs_forward); } else { parse_local_options_v2(cib_client, call_type, call_options, host, op, local_notify, needs_reply, process, needs_forward); } } static gboolean parse_peer_options_v1(int call_type, xmlNode * request, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { const char *op = NULL; const char *host = NULL; const char *delegated = NULL; const char *originator = crm_element_value(request, F_ORIG); const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); const char *update = crm_element_value(request, F_CIB_GLOBAL_UPDATE); gboolean is_reply = safe_str_eq(reply_to, cib_our_uname); if (crm_is_true(update)) { *needs_reply = FALSE; if (is_reply) { *local_notify = TRUE; crm_trace("Processing global/peer update from %s" " that originated from us", originator); } else { crm_trace("Processing global/peer update from %s", originator); } return TRUE; } op = crm_element_value(request, F_CIB_OPERATION); crm_trace("Processing %s request sent by %s", op, originator); if (safe_str_eq(op, "cib_shutdown_req")) { /* Always process these */ *local_notify = FALSE; if (reply_to == NULL || is_reply) { *process = TRUE; } if (is_reply) { *needs_reply = FALSE; } return *process; } if (is_reply && safe_str_eq(op, CRM_OP_PING)) { process_ping_reply(request); return FALSE; } if (is_reply) { crm_trace("Forward reply sent from %s to local clients", originator); *process = FALSE; *needs_reply = FALSE; *local_notify = TRUE; return TRUE; } host = crm_element_value(request, F_CIB_HOST); if (host != NULL && safe_str_eq(host, cib_our_uname)) { crm_trace("Processing %s request sent to us from %s", op, originator); return TRUE; } else if(is_reply == FALSE && safe_str_eq(op, CRM_OP_PING)) { crm_trace("Processing %s request sent to %s by %s", op, host?host:"everyone", originator); *needs_reply = TRUE; return TRUE; } else if (host == NULL && cib_is_master == TRUE) { crm_trace("Processing %s request sent to master instance from %s", op, originator); return TRUE; } delegated = crm_element_value(request, F_CIB_DELEGATED); if (delegated != NULL) { crm_trace("Ignoring msg for master instance"); } else if (host != NULL) { /* this is for a specific instance and we're not it */ crm_trace("Ignoring msg for instance on %s", crm_str(host)); } else if (reply_to == NULL && cib_is_master == FALSE) { /* this is for the master instance and we're not it */ crm_trace("Ignoring reply to %s", crm_str(reply_to)); } else if (safe_str_eq(op, "cib_shutdown_req")) { if (reply_to != NULL) { crm_debug("Processing %s from %s", op, originator); *needs_reply = FALSE; } else { crm_debug("Processing %s reply from %s", op, originator); } return TRUE; } else { crm_err("Nothing for us to do?"); crm_log_xml_err(request, "Peer[inbound]"); } return FALSE; } static gboolean parse_peer_options_v2(int call_type, xmlNode * request, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { const char *host = NULL; const char *delegated = crm_element_value(request, F_CIB_DELEGATED); const char *op = crm_element_value(request, F_CIB_OPERATION); const char *originator = crm_element_value(request, F_ORIG); const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); const char *update = crm_element_value(request, F_CIB_GLOBAL_UPDATE); gboolean is_reply = safe_str_eq(reply_to, cib_our_uname); if(safe_str_eq(op, CIB_OP_REPLACE)) { /* sync_our_cib() sets F_CIB_ISREPLY */ if (reply_to) { delegated = reply_to; } goto skip_is_reply; } else if(safe_str_eq(op, CIB_OP_SYNC)) { } else if (is_reply && safe_str_eq(op, CRM_OP_PING)) { process_ping_reply(request); return FALSE; } else if (safe_str_eq(op, CIB_OP_UPGRADE)) { /* Only the DC (node with the oldest software) should process * this operation if F_CIB_SCHEMA_MAX is unset * * If the DC is happy it will then send out another * CIB_OP_UPGRADE which will tell all nodes to do the actual * upgrade. * * Except this time F_CIB_SCHEMA_MAX will be set which puts a * limit on how far newer nodes will go */ const char *max = crm_element_value(request, F_CIB_SCHEMA_MAX); const char *upgrade_rc = crm_element_value(request, F_CIB_UPGRADE_RC); crm_trace("Parsing %s operation%s for %s with max=%s and upgrade_rc=%s", op, (is_reply? " reply" : ""), (cib_is_master? "master" : "slave"), (max? max : "none"), (upgrade_rc? upgrade_rc : "none")); if (upgrade_rc != NULL) { // Our upgrade request was rejected by DC, notify clients of result crm_xml_add(request, F_CIB_RC, upgrade_rc); } else if ((max == NULL) && cib_is_master) { /* We are the DC, check if this upgrade is allowed */ goto skip_is_reply; } else if(max) { /* Ok, go ahead and upgrade to 'max' */ goto skip_is_reply; } else { // Ignore broadcast client requests when we're not DC return FALSE; } } else if (crm_is_true(update)) { crm_info("Detected legacy %s global update from %s", op, originator); send_sync_request(NULL); legacy_mode = TRUE; return FALSE; } else if (is_reply && cib_op_modifies(call_type)) { crm_trace("Ignoring legacy %s reply sent from %s to local clients", op, originator); return FALSE; } else if (safe_str_eq(op, "cib_shutdown_req")) { /* Legacy handling */ crm_debug("Legacy handling of %s message from %s", op, originator); *local_notify = FALSE; if (reply_to == NULL) { *process = TRUE; } return *process; } if(is_reply) { crm_trace("Handling %s reply sent from %s to local clients", op, originator); *process = FALSE; *needs_reply = FALSE; *local_notify = TRUE; return TRUE; } skip_is_reply: *process = TRUE; *needs_reply = FALSE; if(safe_str_eq(delegated, cib_our_uname)) { *local_notify = TRUE; } else { *local_notify = FALSE; } host = crm_element_value(request, F_CIB_HOST); if (host != NULL && safe_str_eq(host, cib_our_uname)) { crm_trace("Processing %s request sent to us from %s", op, originator); *needs_reply = TRUE; return TRUE; } else if (host != NULL) { /* this is for a specific instance and we're not it */ crm_trace("Ignoring %s operation for instance on %s", op, crm_str(host)); return FALSE; } else if(is_reply == FALSE && safe_str_eq(op, CRM_OP_PING)) { *needs_reply = TRUE; } crm_trace("Processing %s request sent to everyone by %s/%s on %s %s", op, crm_element_value(request, F_CIB_CLIENTNAME), crm_element_value(request, F_CIB_CALLID), originator, (*local_notify)?"(notify)":""); return TRUE; } static gboolean parse_peer_options(int call_type, xmlNode * request, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { /* TODO: What happens when an update comes in after node A * requests the CIB from node B, but before it gets the reply (and * sends out the replace operation) */ if(cib_legacy_mode()) { return parse_peer_options_v1( call_type, request, local_notify, needs_reply, process, needs_forward); } else { return parse_peer_options_v2( call_type, request, local_notify, needs_reply, process, needs_forward); } } static void forward_request(xmlNode * request, pcmk__client_t *cib_client, int call_options) { const char *op = crm_element_value(request, F_CIB_OPERATION); const char *host = crm_element_value(request, F_CIB_HOST); crm_xml_add(request, F_CIB_DELEGATED, cib_our_uname); if (host != NULL) { crm_trace("Forwarding %s op to %s", op, host); send_cluster_message(crm_get_peer(0, host), crm_msg_cib, request, FALSE); } else { crm_trace("Forwarding %s op to master instance", op); send_cluster_message(NULL, crm_msg_cib, request, FALSE); } /* Return the request to its original state */ xml_remove_prop(request, F_CIB_DELEGATED); if (call_options & cib_discard_reply) { crm_trace("Client not interested in reply"); } } static gboolean send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gboolean broadcast) { CRM_ASSERT(msg != NULL); if (broadcast) { /* this (successful) call modified the CIB _and_ the * change needs to be broadcast... * send via HA to other nodes */ int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; const char *digest = NULL; int format = 1; CRM_LOG_ASSERT(result_diff != NULL); digest = crm_element_value(result_diff, XML_ATTR_DIGEST); crm_element_value_int(result_diff, "format", &format); cib_diff_version_details(result_diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); crm_trace("Sending update diff %d.%d.%d -> %d.%d.%d %s", diff_del_admin_epoch, diff_del_epoch, diff_del_updates, diff_add_admin_epoch, diff_add_epoch, diff_add_updates, digest); crm_xml_add(msg, F_CIB_ISREPLY, originator); crm_xml_add(msg, F_CIB_GLOBAL_UPDATE, XML_BOOLEAN_TRUE); crm_xml_add(msg, F_CIB_OPERATION, CIB_OP_APPLY_DIFF); crm_xml_add(msg, F_CIB_USER, CRM_DAEMON_USER); if (format == 1) { CRM_ASSERT(digest != NULL); } add_message_xml(msg, F_CIB_UPDATE_DIFF, result_diff); crm_log_xml_explicit(msg, "copy"); return send_cluster_message(NULL, crm_msg_cib, msg, TRUE); } else if (originator != NULL) { /* send reply via HA to originating node */ crm_trace("Sending request result to %s only", originator); crm_xml_add(msg, F_CIB_ISREPLY, originator); return send_cluster_message(crm_get_peer(0, originator), crm_msg_cib, msg, FALSE); } return FALSE; } static void cib_process_request(xmlNode *request, gboolean force_synchronous, gboolean privileged, pcmk__client_t *cib_client) { int call_type = 0; int call_options = 0; gboolean process = TRUE; gboolean is_update = TRUE; gboolean from_peer = TRUE; gboolean needs_reply = TRUE; gboolean local_notify = FALSE; gboolean needs_forward = FALSE; gboolean global_update = crm_is_true(crm_element_value(request, F_CIB_GLOBAL_UPDATE)); xmlNode *op_reply = NULL; xmlNode *result_diff = NULL; int rc = pcmk_ok; const char *op = crm_element_value(request, F_CIB_OPERATION); const char *originator = crm_element_value(request, F_ORIG); const char *host = crm_element_value(request, F_CIB_HOST); const char *target = NULL; const char *call_id = crm_element_value(request, F_CIB_CALLID); const char *client_id = crm_element_value(request, F_CIB_CLIENTID); const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME); const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); if (cib_client) { from_peer = FALSE; } crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); if (force_synchronous) { call_options |= cib_sync_call; } if (host != NULL && strlen(host) == 0) { host = NULL; } if (host) { target = host; } else if (call_options & cib_scope_local) { target = "local host"; } else { target = "master"; } if (from_peer) { crm_trace("Processing peer %s operation from %s/%s on %s intended for %s (reply=%s)", op, client_name, call_id, originator, target, reply_to); } else { crm_xml_add(request, F_ORIG, cib_our_uname); crm_trace("Processing local %s operation from %s/%s intended for %s", op, client_name, call_id, target); } rc = cib_get_operation_id(op, &call_type); if (rc != pcmk_ok) { /* TODO: construct error reply? */ crm_err("Pre-processing of command failed: %s", pcmk_strerror(rc)); return; } if (from_peer == FALSE) { parse_local_options(cib_client, call_type, call_options, host, op, &local_notify, &needs_reply, &process, &needs_forward); } else if (parse_peer_options(call_type, request, &local_notify, &needs_reply, &process, &needs_forward) == FALSE) { return; } is_update = cib_op_modifies(call_type); if (call_options & cib_discard_reply) { needs_reply = is_update; local_notify = FALSE; } if (needs_forward) { const char *host = crm_element_value(request, F_CIB_HOST); const char *section = crm_element_value(request, F_CIB_SECTION); int log_level = LOG_INFO; if (safe_str_eq(op, CRM_OP_NOOP)) { log_level = LOG_DEBUG; } do_crm_log(log_level, "Forwarding %s operation for section %s to %s (origin=%s/%s/%s)", op, section ? section : "'all'", host ? host : cib_legacy_mode() ? "master" : "all", originator ? originator : "local", client_name, call_id); forward_request(request, cib_client, call_options); return; } if (cib_status != pcmk_ok) { const char *call = crm_element_value(request, F_CIB_CALLID); rc = cib_status; crm_err("Operation ignored, cluster configuration is invalid." " Please repair and restart: %s", pcmk_strerror(cib_status)); op_reply = create_xml_node(NULL, "cib-reply"); crm_xml_add(op_reply, F_TYPE, T_CIB); crm_xml_add(op_reply, F_CIB_OPERATION, op); crm_xml_add(op_reply, F_CIB_CALLID, call); crm_xml_add(op_reply, F_CIB_CLIENTID, client_id); crm_xml_add_int(op_reply, F_CIB_CALLOPTS, call_options); crm_xml_add_int(op_reply, F_CIB_RC, rc); crm_trace("Attaching reply output"); add_message_xml(op_reply, F_CIB_CALLDATA, the_cib); crm_log_xml_explicit(op_reply, "cib:reply"); } else if (process) { time_t finished = 0; time_t now = time(NULL); int level = LOG_INFO; const char *section = crm_element_value(request, F_CIB_SECTION); rc = cib_process_command(request, &op_reply, &result_diff, privileged); if (is_update == FALSE) { level = LOG_TRACE; } else if (global_update) { switch (rc) { case pcmk_ok: level = LOG_INFO; break; case -pcmk_err_old_data: case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: level = LOG_TRACE; break; default: level = LOG_ERR; } } else if (rc != pcmk_ok && is_update) { level = LOG_WARNING; } do_crm_log(level, "Completed %s operation for section %s: %s (rc=%d, origin=%s/%s/%s, version=%s.%s.%s)", op, section ? section : "'all'", pcmk_strerror(rc), rc, originator ? originator : "local", client_name, call_id, the_cib ? crm_element_value(the_cib, XML_ATTR_GENERATION_ADMIN) : "0", the_cib ? crm_element_value(the_cib, XML_ATTR_GENERATION) : "0", the_cib ? crm_element_value(the_cib, XML_ATTR_NUMUPDATES) : "0"); finished = time(NULL); if ((finished - now) > 3) { crm_trace("%s operation took %lds to complete", op, (long)(finished - now)); crm_write_blackbox(0, NULL); } if (op_reply == NULL && (needs_reply || local_notify)) { crm_err("Unexpected NULL reply to message"); crm_log_xml_err(request, "null reply"); needs_reply = FALSE; local_notify = FALSE; } } /* from now on we are the server */ if(is_update && cib_legacy_mode() == FALSE) { crm_trace("Completed pre-sync update from %s/%s/%s%s", originator ? originator : "local", client_name, call_id, local_notify?" with local notification":""); } else if (needs_reply == FALSE || stand_alone) { /* nothing more to do... * this was a non-originating slave update */ crm_trace("Completed slave update"); } else if (cib_legacy_mode() && rc == pcmk_ok && result_diff != NULL && !(call_options & cib_inhibit_bcast)) { gboolean broadcast = FALSE; cib_local_bcast_num++; crm_xml_add_int(request, F_CIB_LOCAL_NOTIFY_ID, cib_local_bcast_num); broadcast = send_peer_reply(request, result_diff, originator, TRUE); if (broadcast && client_id && local_notify && op_reply) { /* If we have been asked to sync the reply, * and a bcast msg has gone out, we queue the local notify * until we know the bcast message has been received */ local_notify = FALSE; crm_trace("Queuing local %ssync notification for %s", (call_options & cib_sync_call) ? "" : "a-", client_id); queue_local_notify(op_reply, client_id, (call_options & cib_sync_call), from_peer); op_reply = NULL; /* the reply is queued, so don't free here */ } } else if (call_options & cib_discard_reply) { crm_trace("Caller isn't interested in reply"); } else if (from_peer) { if (is_update == FALSE || result_diff == NULL) { crm_trace("Request not broadcast: R/O call"); } else if (call_options & cib_inhibit_bcast) { crm_trace("Request not broadcast: inhibited"); } else if (rc != pcmk_ok) { crm_trace("Request not broadcast: call failed: %s", pcmk_strerror(rc)); } else { crm_trace("Directing reply to %s", originator); } send_peer_reply(op_reply, result_diff, originator, FALSE); } if (local_notify && client_id) { crm_trace("Performing local %ssync notification for %s", (call_options & cib_sync_call) ? "" : "a-", client_id); if (process == FALSE) { do_local_notify(request, client_id, call_options & cib_sync_call, from_peer); } else { do_local_notify(op_reply, client_id, call_options & cib_sync_call, from_peer); } } free_xml(op_reply); free_xml(result_diff); return; } static int cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gboolean privileged) { xmlNode *input = NULL; xmlNode *output = NULL; xmlNode *result_cib = NULL; xmlNode *current_cib = NULL; int call_type = 0; int call_options = 0; const char *op = NULL; const char *section = NULL; const char *call_id = crm_element_value(request, F_CIB_CALLID); int rc = pcmk_ok; int rc2 = pcmk_ok; gboolean send_r_notify = FALSE; gboolean global_update = FALSE; gboolean config_changed = FALSE; gboolean manage_counters = TRUE; static mainloop_timer_t *digest_timer = NULL; CRM_ASSERT(cib_status == pcmk_ok); if(digest_timer == NULL) { digest_timer = mainloop_timer_add("digester", 5000, FALSE, cib_digester_cb, NULL); } *reply = NULL; *cib_diff = NULL; current_cib = the_cib; /* Start processing the request... */ op = crm_element_value(request, F_CIB_OPERATION); crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); rc = cib_get_operation_id(op, &call_type); if (rc == pcmk_ok && privileged == FALSE) { rc = cib_op_can_run(call_type, call_options, privileged, global_update); } rc2 = cib_op_prepare(call_type, request, &input, §ion); if (rc == pcmk_ok) { rc = rc2; } if (rc != pcmk_ok) { crm_trace("Call setup failed: %s", pcmk_strerror(rc)); goto done; } else if (cib_op_modifies(call_type) == FALSE) { rc = cib_perform_op(op, call_options, cib_op_func(call_type), TRUE, section, request, input, FALSE, &config_changed, current_cib, &result_cib, NULL, &output); CRM_CHECK(result_cib == NULL, free_xml(result_cib)); goto done; } /* Handle a valid write action */ global_update = crm_is_true(crm_element_value(request, F_CIB_GLOBAL_UPDATE)); if (global_update) { /* legacy code */ manage_counters = FALSE; call_options |= cib_force_diff; crm_trace("Global update detected"); CRM_CHECK(call_type == 3 || call_type == 4, crm_err("Call type: %d", call_type); crm_log_xml_err(request, "bad op")); } if (rc == pcmk_ok) { ping_modified_since = TRUE; if (call_options & cib_inhibit_bcast) { /* skip */ crm_trace("Skipping update: inhibit broadcast"); manage_counters = FALSE; } if (is_not_set(call_options, cib_dryrun) && safe_str_eq(section, XML_CIB_TAG_STATUS)) { /* Copying large CIBs accounts for a huge percentage of our CIB usage */ call_options |= cib_zero_copy; } else { clear_bit(call_options, cib_zero_copy); } /* result_cib must not be modified after cib_perform_op() returns */ rc = cib_perform_op(op, call_options, cib_op_func(call_type), FALSE, section, request, input, manage_counters, &config_changed, current_cib, &result_cib, cib_diff, &output); if (manage_counters == FALSE) { int format = 1; /* Legacy code * If the diff is NULL at this point, it's because nothing changed */ if (*cib_diff) { crm_element_value_int(*cib_diff, "format", &format); } if (format == 1) { config_changed = cib_config_changed(NULL, NULL, cib_diff); } } /* Always write to disk for replace ops, * this also negates the need to detect ordering changes */ if (crm_str_eq(CIB_OP_REPLACE, op, TRUE)) { config_changed = TRUE; } } if (rc == pcmk_ok && is_not_set(call_options, cib_dryrun)) { crm_trace("Activating %s->%s%s%s", crm_element_value(current_cib, XML_ATTR_NUMUPDATES), crm_element_value(result_cib, XML_ATTR_NUMUPDATES), (is_set(call_options, cib_zero_copy)? " zero-copy" : ""), (config_changed? " changed" : "")); if(is_not_set(call_options, cib_zero_copy)) { rc = activateCibXml(result_cib, config_changed, op); crm_trace("Activated %s (%d)", crm_element_value(current_cib, XML_ATTR_NUMUPDATES), rc); } if (rc == pcmk_ok && cib_internal_config_changed(*cib_diff)) { cib_read_config(config_hash, result_cib); } if (crm_str_eq(CIB_OP_REPLACE, op, TRUE)) { if (section == NULL) { send_r_notify = TRUE; } else if (safe_str_eq(section, XML_TAG_CIB)) { send_r_notify = TRUE; } else if (safe_str_eq(section, XML_CIB_TAG_NODES)) { send_r_notify = TRUE; } else if (safe_str_eq(section, XML_CIB_TAG_STATUS)) { send_r_notify = TRUE; } } else if (crm_str_eq(CIB_OP_ERASE, op, TRUE)) { send_r_notify = TRUE; } mainloop_timer_stop(digest_timer); mainloop_timer_start(digest_timer); } else if (rc == -pcmk_err_schema_validation) { CRM_ASSERT(is_not_set(call_options, cib_zero_copy)); if (output != NULL) { crm_log_xml_info(output, "cib:output"); free_xml(output); } output = result_cib; } else { crm_trace("Not activating %d %d %s", rc, is_set(call_options, cib_dryrun), crm_element_value(result_cib, XML_ATTR_NUMUPDATES)); if(is_not_set(call_options, cib_zero_copy)) { free_xml(result_cib); } } if ((call_options & (cib_inhibit_notify|cib_dryrun)) == 0) { const char *client = crm_element_value(request, F_CIB_CLIENTNAME); crm_trace("Sending notifications %d", is_set(call_options, cib_dryrun)); cib_diff_notify(call_options, client, call_id, op, input, rc, *cib_diff); } if (send_r_notify) { const char *origin = crm_element_value(request, F_ORIG); cib_replace_notify(origin, the_cib, rc, *cib_diff); } xml_log_patchset(LOG_TRACE, "cib:diff", *cib_diff); done: if ((call_options & cib_discard_reply) == 0) { const char *caller = crm_element_value(request, F_CIB_CLIENTID); *reply = create_xml_node(NULL, "cib-reply"); crm_xml_add(*reply, F_TYPE, T_CIB); crm_xml_add(*reply, F_CIB_OPERATION, op); crm_xml_add(*reply, F_CIB_CALLID, call_id); crm_xml_add(*reply, F_CIB_CLIENTID, caller); crm_xml_add_int(*reply, F_CIB_CALLOPTS, call_options); crm_xml_add_int(*reply, F_CIB_RC, rc); if (output != NULL) { crm_trace("Attaching reply output"); add_message_xml(*reply, F_CIB_CALLDATA, output); } crm_log_xml_explicit(*reply, "cib:reply"); } crm_trace("cleanup"); if (cib_op_modifies(call_type) == FALSE && output != current_cib) { free_xml(output); output = NULL; } if (call_type >= 0) { cib_op_cleanup(call_type, call_options, &input, &output); } crm_trace("done"); return rc; } void cib_peer_callback(xmlNode * msg, void *private_data) { const char *reason = NULL; const char *originator = crm_element_value(msg, F_ORIG); if (cib_legacy_mode() && (originator == NULL || crm_str_eq(originator, cib_our_uname, TRUE))) { /* message is from ourselves */ int bcast_id = 0; if (!(crm_element_value_int(msg, F_CIB_LOCAL_NOTIFY_ID, &bcast_id))) { check_local_notify(bcast_id); } return; } else if (crm_peer_cache == NULL) { reason = "membership not established"; goto bail; } if (crm_element_value(msg, F_CIB_CLIENTNAME) == NULL) { crm_xml_add(msg, F_CIB_CLIENTNAME, originator); } /* crm_log_xml_trace("Peer[inbound]", msg); */ cib_process_request(msg, FALSE, TRUE, NULL); return; bail: if (reason) { const char *seq = crm_element_value(msg, F_SEQ); const char *op = crm_element_value(msg, F_CIB_OPERATION); crm_warn("Discarding %s message (%s) from %s: %s", op, seq, originator, reason); } } static gboolean cib_force_exit(gpointer data) { crm_notice("Forcing exit!"); terminate_cib(__FUNCTION__, CRM_EX_ERROR); return FALSE; } static void disconnect_remote_client(gpointer key, gpointer value, gpointer user_data) { pcmk__client_t *a_client = value; crm_err("Disconnecting %s... Not implemented", crm_str(a_client->name)); } void cib_shutdown(int nsig) { struct qb_ipcs_stats srv_stats; if (cib_shutdown_flag == FALSE) { int disconnects = 0; qb_ipcs_connection_t *c = NULL; cib_shutdown_flag = TRUE; c = qb_ipcs_connection_first_get(ipcs_rw); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_rw, last); crm_debug("Disconnecting r/w client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } c = qb_ipcs_connection_first_get(ipcs_ro); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_ro, last); crm_debug("Disconnecting r/o client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } c = qb_ipcs_connection_first_get(ipcs_shm); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_shm, last); crm_debug("Disconnecting non-blocking r/w client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } disconnects += pcmk__ipc_client_count(); crm_debug("Disconnecting %d remote clients", pcmk__ipc_client_count()); pcmk__foreach_ipc_client(disconnect_remote_client, NULL); crm_info("Disconnected %d clients", disconnects); } qb_ipcs_stats_get(ipcs_rw, &srv_stats, QB_FALSE); if (pcmk__ipc_client_count() == 0) { crm_info("All clients disconnected (%d)", srv_stats.active_connections); initiate_exit(); } else { crm_info("Waiting on %d clients to disconnect (%d)", pcmk__ipc_client_count(), srv_stats.active_connections); } } void initiate_exit(void) { int active = 0; xmlNode *leaving = NULL; active = crm_active_peers(); if (active < 2) { terminate_cib(__FUNCTION__, 0); return; } crm_info("Sending disconnect notification to %d peers...", active); leaving = create_xml_node(NULL, "exit-notification"); crm_xml_add(leaving, F_TYPE, "cib"); crm_xml_add(leaving, F_CIB_OPERATION, "cib_shutdown_req"); send_cluster_message(NULL, crm_msg_cib, leaving, TRUE); free_xml(leaving); g_timeout_add(EXIT_ESCALATION_MS, cib_force_exit, NULL); } extern int remote_fd; extern int remote_tls_fd; /*! * \internal * \brief Close remote sockets, free the global CIB and quit * * \param[in] caller Name of calling function (for log message) * \param[in] fast If -1, skip disconnect; if positive, exit that */ void terminate_cib(const char *caller, int fast) { crm_info("%s: Exiting%s...", caller, (fast > 0)? " fast" : mainloop ? " from mainloop" : ""); if (remote_fd > 0) { close(remote_fd); remote_fd = 0; } if (remote_tls_fd > 0) { close(remote_tls_fd); remote_tls_fd = 0; } uninitializeCib(); if (fast > 0) { /* Quit fast on error */ pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(fast); } else if ((mainloop != NULL) && g_main_loop_is_running(mainloop)) { /* Quit via returning from the main loop. If fast == -1, we skip the * disconnect here, and it will be done when the main loop returns * (this allows the peer status callback to avoid messing with the * peer caches). */ if (fast == 0) { crm_cluster_disconnect(&crm_cluster); } g_main_loop_quit(mainloop); } else { /* Quit via clean exit. Even the peer status callback can disconnect * here, because we're not returning control to the caller. */ crm_cluster_disconnect(&crm_cluster); pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(CRM_EX_OK); } } diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c index 386eabbc01..b70992f2fa 100644 --- a/daemons/controld/controld_execd.c +++ b/daemons/controld/controld_execd.c @@ -1,2850 +1,2850 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include // lrmd_event_data_t, lrmd_rsc_info_t, etc. #include #include #include #include #include #include #define START_DELAY_THRESHOLD 5 * 60 * 1000 #define MAX_LRM_REG_FAILS 30 struct delete_event_s { int rc; const char *rsc; lrm_state_t *lrm_state; }; static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation); static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *operation, xmlNode *msg); static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level); static int do_update_resource(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, time_t lock_time); static void lrm_connection_destroy(void) { if (is_set(fsa_input_register, R_LRM_CONNECTED)) { crm_crit("Connection to executor failed"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); clear_bit(fsa_input_register, R_LRM_CONNECTED); } else { crm_info("Disconnected from executor"); } } static char * make_stop_id(const char *rsc, int call_id) { return crm_strdup_printf("%s:%d", rsc, call_id); } static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } /*! * \internal * \brief Remove a recurring operation from a resource's history * * \param[in,out] history Resource history to modify * \param[in] op Operation to remove * * \return TRUE if the operation was found and removed, FALSE otherwise */ static gboolean history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_event_data_t *existing = iter->data; if ((op->interval_ms == existing->interval_ms) && crm_str_eq(op->rsc_id, existing->rsc_id, TRUE) && safe_str_eq(op->op_type, existing->op_type)) { history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter); lrmd_free_event(existing); return TRUE; } } return FALSE; } /*! * \internal * \brief Free all recurring operations in resource history * * \param[in,out] history Resource history to modify */ static void history_free_recurring_ops(rsc_history_t *history) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_free_event(iter->data); } g_list_free(history->recurring_op_list); history->recurring_op_list = NULL; } /*! * \internal * \brief Free resource history * * \param[in,out] history Resource history to free */ void history_free(gpointer data) { rsc_history_t *history = (rsc_history_t*)data; if (history->stop_params) { g_hash_table_destroy(history->stop_params); } /* Don't need to free history->rsc.id because it's set to history->id */ free(history->rsc.type); free(history->rsc.standard); free(history->rsc.provider); lrmd_free_event(history->failed); lrmd_free_event(history->last); free(history->id); history_free_recurring_ops(history); free(history); } static void update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int target_rc = 0; rsc_history_t *entry = NULL; if (op->rsc_deleted) { crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); controld_delete_resource_history(op->rsc_id, lrm_state->node_name, NULL, crmd_cib_smart_opt()); return; } if (safe_str_eq(op->op_type, RSC_NOTIFY)) { return; } crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type); entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { entry = calloc(1, sizeof(rsc_history_t)); entry->id = strdup(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; entry->rsc.type = strdup(rsc->type); entry->rsc.standard = strdup(rsc->standard); if (rsc->provider) { entry->rsc.provider = strdup(rsc->provider); } else { entry->rsc.provider = NULL; } } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); return; } entry->last_callid = op->call_id; target_rc = rsc_op_expected_rc(op); if (op->op_status == PCMK_LRM_OP_CANCELLED) { if (op->interval_ms > 0) { crm_trace("Removing cancelled recurring op: " PCMK__OP_FMT, op->rsc_id, op->op_type, op->interval_ms); history_remove_recurring_op(entry, op); return; } else { crm_trace("Skipping " PCMK__OP_FMT " rc=%d, status=%d", op->rsc_id, op->op_type, op->interval_ms, op->rc, op->op_status); } } else if (did_rsc_op_fail(op, target_rc)) { /* Store failed monitors here, otherwise the block below will cause them * to be forgotten when a stop happens. */ if (entry->failed) { lrmd_free_event(entry->failed); } entry->failed = lrmd_copy_event(op); } else if (op->interval_ms == 0) { if (entry->last) { lrmd_free_event(entry->last); } entry->last = lrmd_copy_event(op); if (op->params && (safe_str_eq(CRMD_ACTION_START, op->op_type) || safe_str_eq("reload", op->op_type) || safe_str_eq(CRMD_ACTION_STATUS, op->op_type))) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } entry->stop_params = crm_str_table_new(); g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params); } } if (op->interval_ms > 0) { /* Ensure there are no duplicates */ history_remove_recurring_op(entry, op); crm_trace("Adding recurring op: " PCMK__OP_FMT, op->rsc_id, op->op_type, op->interval_ms); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) { crm_trace("Dropping %d recurring ops because of: " PCMK__OP_FMT, g_list_length(entry->recurring_op_list), op->rsc_id, op->op_type, op->interval_ms); history_free_recurring_ops(entry); } } /*! * \internal * \brief Send a direct OK ack for a resource task * * \param[in] lrm_state LRM connection * \param[in] input Input message being ack'ed * \param[in] rsc_id ID of affected resource * \param[in] rsc Affected resource (if available) * \param[in] task Operation task being ack'ed * \param[in] ack_host Name of host to send ack to * \param[in] ack_sys IPC system name to ack */ static void send_task_ok_ack(lrm_state_t *lrm_state, ha_msg_input_t *input, const char *rsc_id, lrmd_rsc_info_t *rsc, const char *task, const char *ack_host, const char *ack_sys) { lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task); op->rc = PCMK_OCF_OK; op->op_status = PCMK_LRM_OP_DONE; controld_ack_event_directly(ack_host, ack_sys, rsc, op, rsc_id); lrmd_free_event(op); } static inline const char * op_node_name(lrmd_event_data_t *op) { return op->remote_nodename? op->remote_nodename : fsa_our_uname; } void lrm_op_callback(lrmd_event_data_t * op) { CRM_CHECK(op != NULL, return); switch (op->type) { case lrmd_event_disconnect: if (op->remote_nodename == NULL) { /* If this is the local executor IPC connection, set the right * bits in the controller when the connection goes down. */ lrm_connection_destroy(); } break; case lrmd_event_exec_complete: { lrm_state_t *lrm_state = lrm_state_find(op_node_name(op)); CRM_ASSERT(lrm_state != NULL); process_lrm_event(lrm_state, op, NULL, NULL); } break; default: break; } } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* This only pertains to local executor connections. Remote connections are * handled as resources within the scheduler. Connecting and disconnecting * from remote executor instances is handled differently. */ lrm_state_t *lrm_state = NULL; if(fsa_our_uname == NULL) { return; /* Nothing to do */ } lrm_state = lrm_state_find_or_create(fsa_our_uname); if (lrm_state == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if (action & A_LRM_DISCONNECT) { if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) { if (action == A_LRM_DISCONNECT) { crmd_fsa_stall(FALSE); return; } } clear_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Disconnecting from the executor"); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state, FALSE); crm_notice("Disconnected from the executor"); } if (action & A_LRM_CONNECT) { int ret = pcmk_ok; crm_debug("Connecting to the executor"); ret = lrm_state_ipc_connect(lrm_state); if (ret != pcmk_ok) { if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) { crm_warn("Failed to connect to the executor %d time%s (%d max)", lrm_state->num_lrm_register_fails, pcmk__plural_s(lrm_state->num_lrm_register_fails), MAX_LRM_REG_FAILS); controld_start_timer(wait_timer); crmd_fsa_stall(FALSE); return; } } if (ret != pcmk_ok) { crm_err("Failed to connect to the executor the max allowed %d time%s", lrm_state->num_lrm_register_fails, pcmk__plural_s(lrm_state->num_lrm_register_fails)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } set_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Connection to the executor established"); } if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__); } } static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; const char *when = "lrm disconnect"; GHashTableIter gIter; const char *key = NULL; rsc_history_t *entry = NULL; active_op_t *pending = NULL; crm_debug("Checking for active resources before exit"); if (cur_state == S_TERMINATE) { log_level = LOG_ERR; when = "shutdown"; } else if (is_set(fsa_input_register, R_SHUTDOWN)) { when = "shutdown... waiting"; } if (lrm_state->pending_ops && lrm_state_is_connected(lrm_state) == TRUE) { guint removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_actions, lrm_state); guint nremaining = g_hash_table_size(lrm_state->pending_ops); if (removed || nremaining) { crm_notice("Stopped %u recurring operation%s at %s (%u remaining)", removed, pcmk__plural_s(removed), when, nremaining); } } if (lrm_state->pending_ops) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) { /* Ignore recurring actions in the shutdown calculations */ if (pending->interval_ms == 0) { counter++; } } } if (counter > 0) { do_crm_log(log_level, "%d pending executor operation%s at %s", counter, pcmk__plural_s(counter), when); if (cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) { do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key); } } else { rc = FALSE; } return rc; } if (lrm_state->resource_history == NULL) { return rc; } if (is_set(fsa_input_register, R_SHUTDOWN)) { /* At this point we're not waiting, we're just shutting down */ when = "shutdown"; } counter = 0; g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) { if (is_rsc_active(lrm_state, entry->id) == FALSE) { continue; } counter++; if (log_level == LOG_ERR) { crm_info("Found %s active at %s", entry->id, when); } else { crm_trace("Found %s active at %s", entry->id, when); } if (lrm_state->pending_ops) { GHashTableIter hIter; g_hash_table_iter_init(&hIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) { if (crm_str_eq(entry->id, pending->rsc_id, TRUE)) { crm_notice("%sction %s (%s) incomplete at %s", pending->interval_ms == 0 ? "A" : "Recurring a", key, pending->op_key, when); } } } } if (counter) { crm_err("%d resource%s active at %s", counter, (counter == 1)? " was" : "s were", when); } return rc; } static char * build_parameter_list(const lrmd_event_data_t *op, const struct ra_metadata_s *metadata, xmlNode *result, enum ra_param_flags_e param_type, bool invert_for_xml) { int len = 0; int max = 0; char *list = NULL; GList *iter = NULL; /* Newer resource agents support the "private" parameter attribute to * indicate sensitive parameters. For backward compatibility with older * agents, this list is used if the agent doesn't specify any as "private". */ const char *secure_terms[] = { "password", "passwd", "user", }; if (is_not_set(metadata->ra_flags, ra_uses_private) && (param_type == ra_param_private)) { max = DIMOF(secure_terms); } for (iter = metadata->ra_params; iter != NULL; iter = iter->next) { struct ra_param_s *param = (struct ra_param_s *) iter->data; bool accept = FALSE; if (is_set(param->rap_flags, param_type)) { accept = TRUE; } else if (max) { for (int lpc = 0; lpc < max; lpc++) { if (safe_str_eq(secure_terms[lpc], param->rap_name)) { accept = TRUE; break; } } } if (accept) { int start = len; crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type)); len += strlen(param->rap_name) + 2; // include spaces around list = realloc_safe(list, len + 1); // include null terminator // spaces before and after make parsing simpler sprintf(list + start, " %s ", param->rap_name); } else { crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type)); } if (result && (invert_for_xml? !accept : accept)) { const char *v = g_hash_table_lookup(op->params, param->rap_name); if (v != NULL) { crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v); crm_xml_add(result, param->rap_name, v); } } } return list; } static void append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { char *list = NULL; char *digest = NULL; xmlNode *restart = NULL; CRM_LOG_ASSERT(op->params != NULL); if (op->interval_ms > 0) { /* monitors are not reloadable */ return; } if (is_set(metadata->ra_flags, ra_supports_reload)) { restart = create_xml_node(NULL, XML_TAG_PARAMS); /* Add any parameters with unique="1" to the "op-force-restart" list. * * (Currently, we abuse "unique=0" to indicate reloadability. This is * nonstandard and should eventually be replaced once the OCF standard * is updated with something better.) */ list = build_parameter_list(op, metadata, restart, ra_param_unique, FALSE); } else { /* Resource does not support reloads */ return; } digest = calculate_operation_digest(restart, version); /* Add "op-force-restart" and "op-restart-digest" to indicate the resource supports reload, * no matter if it actually supports any parameters with unique="1"). */ crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list? list: ""); crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, list); crm_log_xml_trace(restart, "restart digest source"); free_xml(restart); free(digest); free(list); } static void append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { char *list = NULL; char *digest = NULL; xmlNode *secure = NULL; CRM_LOG_ASSERT(op->params != NULL); /* * To keep XML_LRM_ATTR_OP_SECURE short, we want it to contain the * secure parameters but XML_LRM_ATTR_SECURE_DIGEST to be based on * the insecure ones */ secure = create_xml_node(NULL, XML_TAG_PARAMS); list = build_parameter_list(op, metadata, secure, ra_param_private, TRUE); if (list != NULL) { digest = calculate_operation_digest(secure, version); crm_xml_add(update, XML_LRM_ATTR_OP_SECURE, list); crm_xml_add(update, XML_LRM_ATTR_SECURE_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, list); crm_log_xml_trace(secure, "secure digest source"); } else { crm_trace("%s: no secure parameters", op->rsc_id); } free_xml(secure); free(digest); free(list); } static gboolean build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *node_name, const char *src) { int target_rc = 0; xmlNode *xml_op = NULL; struct ra_metadata_s *metadata = NULL; const char *caller_version = NULL; lrm_state_t *lrm_state = NULL; if (op == NULL) { return FALSE; } target_rc = rsc_op_expected_rc(op); /* there is a small risk in formerly mixed clusters that it will * be sub-optimal. * * however with our upgrade policy, the update we send should * still be completely supported anyway */ caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION); CRM_LOG_ASSERT(caller_version != NULL); if(caller_version == NULL) { caller_version = CRM_FEATURE_SET; } crm_trace("Building %s operation update with originator version: %s", op->rsc_id, caller_version); xml_op = pcmk__create_history_xml(parent, op, caller_version, target_rc, fsa_our_uname, src, LOG_DEBUG); if (xml_op == NULL) { return TRUE; } if ((rsc == NULL) || (op->params == NULL) || !crm_op_needs_metadata(rsc->standard, op->op_type)) { crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)", op->op_type, op->rsc_id, op->params, rsc); return TRUE; } lrm_state = lrm_state_find(node_name); if (lrm_state == NULL) { crm_warn("Cannot calculate digests for operation " PCMK__OP_FMT " because we have no connection to executor for %s", op->rsc_id, op->op_type, op->interval_ms, node_name); return TRUE; } metadata = metadata_cache_get(lrm_state->metadata_cache, rsc); if (metadata == NULL) { /* For now, we always collect resource agent meta-data via a local, * synchronous, direct execution of the agent. This has multiple issues: * the executor should execute agents, not the controller; meta-data for * Pacemaker Remote nodes should be collected on those nodes, not * locally; and the meta-data call shouldn't eat into the timeout of the * real action being performed. * * These issues are planned to be addressed by having the scheduler * schedule a meta-data cache check at the beginning of each transition. * Once that is working, this block will only be a fallback in case the * initial collection fails. */ char *metadata_str = NULL; int rc = lrm_state_get_metadata(lrm_state, rsc->standard, rsc->provider, rsc->type, &metadata_str, 0); if (rc != pcmk_ok) { crm_warn("Failed to get metadata for %s (%s:%s:%s)", rsc->id, rsc->standard, rsc->provider, rsc->type); return TRUE; } metadata = metadata_cache_update(lrm_state->metadata_cache, rsc, metadata_str); free(metadata_str); if (metadata == NULL) { crm_warn("Failed to update metadata for %s (%s:%s:%s)", rsc->id, rsc->standard, rsc->provider, rsc->type); return TRUE; } } #if ENABLE_VERSIONED_ATTRS crm_xml_add(xml_op, XML_ATTR_RA_VERSION, metadata->ra_version); #endif crm_trace("Including additional digests for %s::%s:%s", rsc->standard, rsc->provider, rsc->type); append_restart_list(op, metadata, xml_op, caller_version); append_secure_list(op, metadata, xml_op, caller_version); return TRUE; } static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) { rsc_history_t *entry = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->last == NULL) { return FALSE; } crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval_ms, entry->last->rc); if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_STOP)) { return FALSE; } else if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE)) { // A stricter check is too complex ... leave that to the scheduler return FALSE; } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) { return FALSE; } else if ((entry->last->interval_ms == 0) && (entry->last->rc == PCMK_OCF_NOT_CONFIGURED)) { /* Badly configured resources can't be reliably stopped */ return FALSE; } return TRUE; } static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) { GHashTableIter iter; rsc_history_t *entry = NULL; g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE); crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id); crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.standard); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider); if (entry->last && entry->last->params) { const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); if (container) { crm_trace("Resource %s is a part of container resource %s", entry->id, container); crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container); } } build_operation_update(xml_rsc, &(entry->rsc), entry->failed, lrm_state->node_name, __FUNCTION__); build_operation_update(xml_rsc, &(entry->rsc), entry->last, lrm_state->node_name, __FUNCTION__); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) { build_operation_update(xml_rsc, &(entry->rsc), gIter->data, lrm_state->node_name, __FUNCTION__); } } return FALSE; } static xmlNode * do_lrm_query_internal(lrm_state_t *lrm_state, int update_flags) { xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; crm_node_t *peer = NULL; peer = crm_get_peer_full(0, lrm_state->node_name, CRM_GET_PEER_ANY); CRM_CHECK(peer != NULL, return NULL); xml_state = create_node_state_update(peer, update_flags, NULL, __FUNCTION__); if (xml_state == NULL) { return NULL; } xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM); crm_xml_add(xml_data, XML_ATTR_ID, peer->uuid); rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(lrm_state, rsc_list); crm_log_xml_trace(xml_state, "Current executor state"); return xml_state; } xmlNode * controld_query_executor_state(const char *node_name) { lrm_state_t *lrm_state = lrm_state_find(node_name); if (!lrm_state) { crm_err("Could not find executor state for node %s", node_name); return NULL; } return do_lrm_query_internal(lrm_state, node_update_cluster|node_update_peer); } /*! * \internal * \brief Map standard Pacemaker return code to operation status and OCF code * * \param[out] event Executor event whose status and return code should be set * \param[in] rc Standard Pacemaker return code */ void controld_rc2event(lrmd_event_data_t *event, int rc) { switch (rc) { case pcmk_rc_ok: event->rc = PCMK_OCF_OK; event->op_status = PCMK_LRM_OP_DONE; break; case EACCES: event->rc = PCMK_OCF_INSUFFICIENT_PRIV; event->op_status = PCMK_LRM_OP_ERROR; break; default: event->rc = PCMK_OCF_UNKNOWN_ERROR; event->op_status = PCMK_LRM_OP_ERROR; break; } } /*! * \internal * \brief Trigger a new transition after CIB status was deleted * * If a CIB status delete was not expected (as part of the transition graph), * trigger a new transition by updating the (arbitrary) "last-lrm-refresh" * cluster property. * * \param[in] from_sys IPC name that requested the delete * \param[in] rsc_id Resource whose status was deleted (for logging only) */ void controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id) { if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { char *now_s = crm_strdup_printf("%lld", (long long) time(NULL)); crm_debug("Triggering a refresh after %s cleaned %s", from_sys, rsc_id); update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, FALSE, NULL, NULL); free(now_s); } } static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "localhost"), rsc_id, ((rc == pcmk_ok)? "" : " not")); op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE); controld_rc2event(op, pcmk_legacy2rc(rc)); controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); controld_trigger_delete_refresh(from_sys, rsc_id); } static gboolean lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data) { struct delete_event_s *event = user_data; struct pending_deletion_op_s *op = value; if (crm_str_eq(event->rsc, op->rsc, TRUE)) { notify_deleted(event->lrm_state, op->input, event->rsc, event->rc); return TRUE; } return FALSE; } static gboolean lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; active_op_t *pending = value; if (crm_str_eq(rsc, pending->rsc_id, TRUE)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } static void delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, GHashTableIter * rsc_gIter, int rc, const char *user_name) { struct delete_event_s event; CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { char *rsc_id_copy = strdup(rsc_id); if (rsc_gIter) { g_hash_table_iter_remove(rsc_gIter); } else { g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); } controld_delete_resource_history(rsc_id_copy, lrm_state->node_name, user_name, crmd_cib_smart_opt()); g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy); free(rsc_id_copy); } if (input) { notify_deleted(lrm_state, input, rsc_id, rc); } event.rc = rc; event.rsc = rsc_id; event.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event); } /*! * \internal * \brief Erase an LRM history entry from the CIB, given the operation data * * \param[in] lrm_state LRM state of the desired node * \param[in] op Operation whose history should be deleted */ static void erase_lrm_history_by_op(lrm_state_t *lrm_state, lrmd_event_data_t *op) { xmlNode *xml_top = NULL; CRM_CHECK(op != NULL, return); xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP); crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data); if (op->interval_ms > 0) { char *op_id = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms); /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */ crm_xml_add(xml_top, XML_ATTR_ID, op_id); free(op_id); } crm_debug("Erasing resource operation history for " PCMK__OP_FMT " (call=%d)", op->rsc_id, op->op_type, op->interval_ms, op->call_id); fsa_cib_conn->cmds->remove(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_quorum_override); crm_log_xml_trace(xml_top, "op:cancel"); free_xml(xml_top); } /* Define xpath to find LRM resource history entry by node and resource */ #define XPATH_HISTORY \ "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \ "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \ "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \ "/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" \ "/" XML_LRM_TAG_RSC_OP /* ... and also by operation key */ #define XPATH_HISTORY_ID XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s']" /* ... and also by operation key and operation call ID */ #define XPATH_HISTORY_CALL XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_CALLID "='%d']" /* ... and also by operation key and original operation key */ #define XPATH_HISTORY_ORIG XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_TASK_KEY "='%s']" /*! * \internal * \brief Erase an LRM history entry from the CIB, given operation identifiers * * \param[in] lrm_state LRM state of the node to clear history for * \param[in] rsc_id Name of resource to clear history for * \param[in] key Operation key of operation to clear history for * \param[in] orig_op If specified, delete only if it has this original op * \param[in] call_id If specified, delete entry only if it has this call ID */ static void erase_lrm_history_by_id(lrm_state_t *lrm_state, const char *rsc_id, const char *key, const char *orig_op, int call_id) { char *op_xpath = NULL; CRM_CHECK((rsc_id != NULL) && (key != NULL), return); if (call_id > 0) { op_xpath = crm_strdup_printf(XPATH_HISTORY_CALL, lrm_state->node_name, rsc_id, key, call_id); } else if (orig_op) { op_xpath = crm_strdup_printf(XPATH_HISTORY_ORIG, lrm_state->node_name, rsc_id, key, orig_op); } else { op_xpath = crm_strdup_printf(XPATH_HISTORY_ID, lrm_state->node_name, rsc_id, key); } crm_debug("Erasing resource operation history for %s on %s (call=%d)", key, rsc_id, call_id); fsa_cib_conn->cmds->remove(fsa_cib_conn, op_xpath, NULL, cib_quorum_override | cib_xpath); free(op_xpath); } static inline gboolean last_failed_matches_op(rsc_history_t *entry, const char *op, guint interval_ms) { if (entry == NULL) { return FALSE; } if (op == NULL) { return TRUE; } return (safe_str_eq(op, entry->failed->op_type) && (interval_ms == entry->failed->interval_ms)); } /*! * \internal * \brief Clear a resource's last failure * * Erase a resource's last failure on a particular node from both the * LRM resource history in the CIB, and the resource history remembered * for the LRM state. * * \param[in] rsc_id Resource name * \param[in] node_name Node name * \param[in] operation If specified, only clear if matching this operation * \param[in] interval_ms If operation is specified, it has this interval */ void lrm_clear_last_failure(const char *rsc_id, const char *node_name, const char *operation, guint interval_ms) { char *op_key = NULL; char *orig_op_key = NULL; lrm_state_t *lrm_state = NULL; lrm_state = lrm_state_find(node_name); if (lrm_state == NULL) { return; } /* Erase from CIB */ op_key = pcmk__op_key(rsc_id, "last_failure", 0); if (operation) { orig_op_key = pcmk__op_key(rsc_id, operation, interval_ms); } erase_lrm_history_by_id(lrm_state, rsc_id, op_key, orig_op_key, 0); free(op_key); free(orig_op_key); /* Remove from memory */ if (lrm_state->resource_history) { rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (last_failed_matches_op(entry, operation, interval_ms)) { lrmd_free_event(entry->failed); entry->failed = NULL; } } } /* Returns: gboolean - cancellation is in progress */ static gboolean cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove) { int rc = pcmk_ok; char *local_key = NULL; active_op_t *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); if (key == NULL) { local_key = make_stop_id(rsc_id, op); key = local_key; } pending = g_hash_table_lookup(lrm_state->pending_ops, key); if (pending) { if (remove && is_not_set(pending->flags, active_op_remove)) { set_bit(pending->flags, active_op_remove); crm_debug("Scheduling %s for removal", key); } if (is_set(pending->flags, active_op_cancelled)) { crm_debug("Operation %s already cancelled", key); free(local_key); return FALSE; } set_bit(pending->flags, active_op_cancelled); } else { crm_info("No pending op found for %s", key); free(local_key); return FALSE; } crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key); rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval_ms); if (rc == pcmk_ok) { crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key); free(local_key); return TRUE; } crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key); /* The caller needs to make sure the entry is * removed from the pending_ops list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from pending_ops will block * the node from shutting down */ free(local_key); return FALSE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct cancel_data *data = user_data; active_op_t *op = value; if (crm_str_eq(op->op_key, data->key, TRUE)) { data->done = TRUE; remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove); } return remove; } static gboolean cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove) { guint removed = 0; struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data); crm_trace("Removed %u op cache entries, new size: %u", removed, g_hash_table_size(lrm_state->pending_ops)); return data.done; } /*! * \internal * \brief Retrieve resource information from LRM * * \param[in] lrm_state LRM connection to use * \param[in] rsc_xml XML containing resource configuration * \param[in] do_create If true, register resource with LRM if not already * \param[out] rsc_info Where to store resource information obtained from LRM * * \retval pcmk_ok Success (and rsc_info holds newly allocated result) * \retval -EINVAL Required information is missing from arguments * \retval -ENOTCONN No active connection to LRM * \retval -ENODEV Resource not found * \retval -errno Error communicating with executor when registering resource * * \note Caller is responsible for freeing result on success. */ static int get_lrm_resource(lrm_state_t *lrm_state, xmlNode *rsc_xml, gboolean do_create, lrmd_rsc_info_t **rsc_info) { const char *id = ID(rsc_xml); CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL); CRM_CHECK(id, return -EINVAL); if (lrm_state_is_connected(lrm_state) == FALSE) { return -ENOTCONN; } crm_trace("Retrieving resource information for %s from the executor", id); *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); // If resource isn't known by ID, try clone name, if provided if (!*rsc_info) { const char *long_id = crm_element_value(rsc_xml, XML_ATTR_ID_LONG); if (long_id) { *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0); } } if ((*rsc_info == NULL) && do_create) { const char *class = crm_element_value(rsc_xml, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(rsc_xml, XML_AGENT_ATTR_PROVIDER); const char *type = crm_element_value(rsc_xml, XML_ATTR_TYPE); int rc; crm_trace("Registering resource %s with the executor", id); rc = lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring); if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; crm_err("Could not register resource %s with the executor on %s: %s " CRM_XS " rc=%d", id, lrm_state->node_name, pcmk_strerror(rc), rc); /* Register this as an internal error if this involves the local * executor. Otherwise, we're likely dealing with an unresponsive * remote node, which is not an FSA failure. */ if (lrm_state_is_local(lrm_state) == TRUE) { register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } return rc; } *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); } return *rsc_info? pcmk_ok : -ENODEV; } static void delete_resource(lrm_state_t * lrm_state, const char *id, lrmd_rsc_info_t * rsc, GHashTableIter * gIter, const char *sys, const char *user, ha_msg_input_t * request, gboolean unregister) { int rc = pcmk_ok; crm_info("Removing resource %s from executor for %s%s%s", id, sys, (user? " as " : ""), (user? user : "")); if (rsc && unregister) { rc = lrm_state_unregister_rsc(lrm_state, id, 0); } if (rc == pcmk_ok) { crm_trace("Resource %s deleted from executor", id); } else if (rc == -EINPROGRESS) { crm_info("Deletion of resource '%s' from executor is pending", id); if (request) { struct pending_deletion_op_s *op = NULL; char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE); op = calloc(1, sizeof(struct pending_deletion_op_s)); op->rsc = strdup(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } return; } else { crm_warn("Could not delete '%s' from executor for %s%s%s: %s " CRM_XS " rc=%d", id, sys, (user? " as " : ""), (user? user : ""), pcmk_strerror(rc), rc); } delete_rsc_entry(lrm_state, request, id, gIter, rc, user); } static int get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id) { int call_id = 999999999; rsc_history_t *entry = NULL; if(lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* Make sure the call id is greater than the last successful operation, * otherwise the failure will not result in a possible recovery of the resource * as it could appear the failure occurred before the successful start */ if (entry) { call_id = entry->last_callid + 1; } if (call_id < 0) { call_id = 1; } return call_id; } static void fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status, enum ocf_exitcode op_exitcode) { op->call_id = get_fake_call_id(lrm_state, op->rsc_id); op->t_run = time(NULL); op->t_rcchange = op->t_run; op->op_status = op_status; op->rc = op_exitcode; } static void force_reprobe(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node) { GHashTableIter gIter; rsc_history_t *entry = NULL; crm_info("Clearing resource history on node %s", lrm_state->node_name); g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { /* only unregister the resource during a reprobe if it is not a remote connection * resource. otherwise unregistering the connection will terminate remote-node * membership */ gboolean unregister = TRUE; if (is_remote_lrmd_ra(NULL, NULL, entry->id)) { lrm_state_t *remote_lrm_state = lrm_state_find(entry->id); if (remote_lrm_state) { /* when forcing a reprobe, make sure to clear remote node before * clearing the remote node's connection resource */ force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE); } unregister = FALSE; } delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, user_name, NULL, unregister); } /* Now delete the copy in the CIB */ controld_delete_node_state(lrm_state->node_name, controld_section_lrm, cib_scope_local); /* Finally, _delete_ the value in pacemaker-attrd -- setting it to FALSE * would result in the scheduler sending us back here again */ update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node); } /*! * \internal * \brief Fail a requested action without actually executing it * * For an action that can't be executed, process it similarly to an actual * execution result, with specified error status (except for notify actions, * which will always be treated as successful). * * \param[in] lrm_state Executor connection that action is for * \param[in] action Action XML from request * \param[in] rc Desired return code to use * \param[in] op_status Desired operation status to use */ static void synthesize_lrmd_failure(lrm_state_t *lrm_state, xmlNode *action, int op_status, enum ocf_exitcode rc) { lrmd_event_data_t *op = NULL; const char *operation = crm_element_value(action, XML_LRM_ATTR_TASK); const char *target_node = crm_element_value(action, XML_LRM_ATTR_TARGET); xmlNode *xml_rsc = find_xml_node(action, XML_CIB_TAG_RESOURCE, TRUE); if ((xml_rsc == NULL) || (ID(xml_rsc) == NULL)) { /* @TODO Should we do something else, like direct ack? */ crm_info("Can't fake %s failure (%d) on %s without resource configuration", crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc, target_node); return; } else if(operation == NULL) { /* This probably came from crm_resource -C, nothing to do */ crm_info("Can't fake %s failure (%d) on %s without operation", ID(xml_rsc), rc, target_node); return; } op = construct_op(lrm_state, action, ID(xml_rsc), operation); if (safe_str_eq(operation, RSC_NOTIFY)) { // Notifications can't fail fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_OK); } else { fake_op_status(lrm_state, op, op_status, rc); } crm_info("Faking " PCMK__OP_FMT " result (%d) on %s", op->rsc_id, op->op_type, op->interval_ms, op->rc, target_node); // Process the result as if it came from the LRM process_lrm_event(lrm_state, op, NULL, action); lrmd_free_event(op); } /*! * \internal * \brief Get target of an LRM operation * * \param[in] xml LRM operation data XML * * \return LRM operation target node name (local node or Pacemaker Remote node) */ static const char * lrm_op_target(xmlNode *xml) { const char *target = NULL; if (xml) { target = crm_element_value(xml, XML_LRM_ATTR_TARGET); } if (target == NULL) { target = fsa_our_uname; } return target; } static void fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, const char *from_host, const char *from_sys) { lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); /* The executor simply executes operations and reports the results, without * any concept of success or failure, so to fail a resource, we must fake * what a failure looks like. * * To do this, we create a fake executor operation event for the resource, * and pass that event to the executor client callback so it will be * processed as if it came from the executor. */ op = construct_op(lrm_state, xml, ID(xml_rsc), "asyncmon"); fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR); free((char*) op->user_data); op->user_data = NULL; op->interval_ms = 0; #if ENABLE_ACL if (user_name && !pcmk__is_privileged(user_name)) { crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); controld_ack_event_directly(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); return; } #endif if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) { crm_info("Failing resource %s...", rsc->id); op->exit_reason = strdup("Simulated failure"); process_lrm_event(lrm_state, op, NULL, xml); op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; lrmd_free_rsc_info(rsc); } else { crm_info("Cannot find/create resource in order to fail it..."); crm_log_xml_warn(xml, "bad input"); } controld_ack_event_directly(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); } static void handle_refresh_op(lrm_state_t *lrm_state, const char *user_name, const char *from_host, const char *from_sys) { int rc = pcmk_ok; xmlNode *fragment = do_lrm_query_internal(lrm_state, node_update_all); fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name); crm_info("Forced a local resource history refresh: call=%d", rc); if (safe_str_neq(CRM_SYSTEM_CRMD, from_sys)) { xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, fragment, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } free_xml(fragment); } static void handle_query_op(xmlNode *msg, lrm_state_t *lrm_state) { xmlNode *data = do_lrm_query_internal(lrm_state, node_update_all); xmlNode *reply = create_reply(msg, data); if (relay_message(reply, TRUE) == FALSE) { crm_err("Unable to route reply"); crm_log_xml_err(reply, "reply"); } free_xml(reply); free_xml(data); } static void handle_reprobe_op(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node) { crm_notice("Forcing the status of all resources to be redetected"); force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node); if (safe_str_neq(CRM_SYSTEM_PENGINE, from_sys) && safe_str_neq(CRM_SYSTEM_TENGINE, from_sys)) { xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, NULL, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } } static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys) { char *op_key = NULL; char *meta_key = NULL; int call = 0; const char *call_id = NULL; const char *op_task = NULL; guint interval_ms = 0; gboolean in_progress = FALSE; xmlNode *params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); CRM_CHECK(params != NULL, return FALSE); meta_key = crm_meta_name(XML_LRM_ATTR_TASK); op_task = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(op_task != NULL, return FALSE); meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); if (crm_element_value_ms(params, meta_key, &interval_ms) != pcmk_ok) { free(meta_key); return FALSE; } free(meta_key); op_key = pcmk__op_key(rsc->id, op_task, interval_ms); meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); call_id = crm_element_value(params, meta_key); free(meta_key); crm_debug("Scheduler requested op %s (call=%s) be cancelled", op_key, (call_id? call_id : "NA")); call = crm_parse_int(call_id, "0"); if (call == 0) { // Normal case when the scheduler cancels a recurring op in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE); } else { // Normal case when the scheduler cancels an orphan op in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); } // Acknowledge cancellation operation if for a remote connection resource if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) { char *op_id = make_stop_id(rsc->id, call); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) { crm_info("Nothing known about operation %d for %s", call, op_key); } erase_lrm_history_by_id(lrm_state, rsc->id, op_key, NULL, call); send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, from_host, from_sys); /* needed at least for cancellation of a remote operation */ g_hash_table_remove(lrm_state->pending_ops, op_id); free(op_id); } else { /* No ack is needed since abcdaa8, but peers with older versions * in a rolling upgrade need one. We didn't bump the feature set * at that commit, so we can only compare against the previous * CRM version (3.0.8). If any peers have feature set 3.0.9 but * not abcdaa8, they will time out waiting for the ack (no * released versions of Pacemaker are affected). */ const char *peer_version = crm_element_value(params, XML_ATTR_CRM_VERSION); if (compare_version(peer_version, "3.0.8") <= 0) { crm_info("Sending compatibility ack for %s cancellation to %s (CRM version %s)", op_key, from_host, peer_version); send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, from_host, from_sys); } } free(op_key); return TRUE; } static void do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host, bool crm_rsc_delete, const char *user_name) { gboolean unregister = TRUE; #if ENABLE_ACL int cib_rc = controld_delete_resource_history(rsc->id, lrm_state->node_name, user_name, cib_dryrun|cib_sync_call); if (cib_rc != pcmk_rc_ok) { lrmd_event_data_t *op = NULL; op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE); op->op_status = PCMK_LRM_OP_ERROR; if (cib_rc == EACCES) { op->rc = PCMK_OCF_INSUFFICIENT_PRIV; } else { op->rc = PCMK_OCF_UNKNOWN_ERROR; } controld_ack_event_directly(from_host, from_sys, NULL, op, rsc->id); lrmd_free_event(op); return; } #endif if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) { unregister = FALSE; } delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, user_name, input, unregister); } /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { lrm_state_t *lrm_state = NULL; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *user_name = NULL; const char *target_node = NULL; gboolean is_remote_node = FALSE; bool crm_rsc_delete = FALSE; target_node = lrm_op_target(input->xml); is_remote_node = safe_str_neq(target_node, fsa_our_uname); lrm_state = lrm_state_find(target_node); if ((lrm_state == NULL) && is_remote_node) { crm_err("Failing action because local node has never had connection to remote node %s", target_node); synthesize_lrmd_failure(NULL, input->xml, PCMK_LRM_OP_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR); return; } CRM_ASSERT(lrm_state != NULL); #if ENABLE_ACL user_name = pcmk__update_acl_user(input->msg, F_CRM_USER, NULL); #endif crm_op = crm_element_value(input->msg, F_CRM_TASK); from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); } #if ENABLE_ACL crm_trace("Executor %s command from %s as user %s", crm_op, from_sys, user_name); #else crm_trace("Executor %s command from %s", crm_op, from_sys); #endif if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) { if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { crm_rsc_delete = TRUE; // from crm_resource } operation = CRMD_ACTION_DELETE; } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) { fail_lrm_resource(input->xml, lrm_state, user_name, from_host, from_sys); return; } else if (input->xml != NULL) { operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); } if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { handle_refresh_op(lrm_state, user_name, from_host, from_sys); } else if (safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) { handle_query_op(input->msg, lrm_state); } else if (safe_str_eq(operation, CRM_OP_PROBED)) { update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, user_name, is_remote_node); } else if (safe_str_eq(operation, CRM_OP_REPROBE) || safe_str_eq(crm_op, CRM_OP_REPROBE)) { handle_reprobe_op(lrm_state, from_sys, from_host, user_name, is_remote_node); } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); gboolean create_rsc = safe_str_neq(operation, CRMD_ACTION_DELETE); int rc; // We can't return anything meaningful without a resource ID CRM_CHECK(xml_rsc && ID(xml_rsc), return); rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc); if (rc == -ENOTCONN) { synthesize_lrmd_failure(lrm_state, input->xml, PCMK_LRM_OP_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR); return; } else if ((rc < 0) && !create_rsc) { /* Delete of malformed or nonexistent resource * (deleting something that does not exist is a success) */ crm_notice("Not registering resource '%s' for a %s event " CRM_XS " get-rc=%d (%s) transition-key=%s", ID(xml_rsc), operation, rc, pcmk_strerror(rc), ID(input->xml)); delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, user_name); return; } else if (rc == -EINVAL) { // Resource operation on malformed resource crm_err("Invalid resource definition for %s", ID(xml_rsc)); crm_log_xml_warn(input->msg, "invalid resource"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_LRM_OP_ERROR, PCMK_OCF_NOT_CONFIGURED); // fatal error return; } else if (rc < 0) { // Error communicating with the executor crm_err("Could not register resource '%s' with executor: %s " CRM_XS " rc=%d", ID(xml_rsc), pcmk_strerror(rc), rc); crm_log_xml_warn(input->msg, "failed registration"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_LRM_OP_ERROR, PCMK_OCF_INVALID_PARAM); // hard error return; } if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) { if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) { crm_log_xml_warn(input->xml, "Bad command"); } } else if (safe_str_eq(operation, CRMD_ACTION_DELETE)) { do_lrm_delete(input, lrm_state, rsc, from_sys, from_host, crm_rsc_delete, user_name); } else { do_lrm_rsc_op(lrm_state, rsc, operation, input->xml); } lrmd_free_rsc_info(rsc); } else { crm_err("Cannot perform operation %s of unknown type", crm_str(crm_op)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static lrmd_event_data_t * construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation) { lrmd_event_data_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; GHashTable *params = NULL; const char *transition = NULL; CRM_ASSERT(rsc_id && operation); op = lrmd_new_event(rsc_id, operation, 0); op->type = lrmd_event_exec_complete; op->op_status = PCMK_LRM_OP_PENDING; op->rc = -1; op->timeout = 0; op->start_delay = 0; if (rsc_op == NULL) { CRM_LOG_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = crm_str_table_new(); g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); g_hash_table_remove(params, CRM_META "_op_target_rc"); op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY); op->start_delay = crm_parse_int(op_delay, "0"); op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT); op->timeout = crm_parse_int(op_timeout, "0"); if (pcmk__guint_from_hash(params, CRM_META "_" XML_LRM_ATTR_INTERVAL_MS, 0, &(op->interval_ms)) != pcmk_rc_ok) { op->interval_ms = 0; } #if ENABLE_VERSIONED_ATTRS // Resolve any versioned parameters if (lrm_state && safe_str_neq(op->op_type, RSC_METADATA) && safe_str_neq(op->op_type, CRMD_ACTION_DELETE) && !is_remote_lrmd_ra(NULL, NULL, rsc_id)) { // Resource info *should* already be cached, so we don't get executor call lrmd_rsc_info_t *rsc = lrm_state_get_rsc_info(lrm_state, rsc_id, 0); struct ra_metadata_s *metadata; metadata = metadata_cache_get(lrm_state->metadata_cache, rsc); if (metadata) { xmlNode *versioned_attrs = NULL; GHashTable *hash = NULL; char *key = NULL; char *value = NULL; GHashTableIter iter; versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_ATTRS); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_iter_steal(&iter); g_hash_table_replace(params, key, value); } g_hash_table_destroy(hash); versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_META); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_replace(params, crm_meta_name(key), strdup(value)); if (safe_str_eq(key, XML_ATTR_TIMEOUT)) { op->timeout = crm_parse_int(value, "0"); } else if (safe_str_eq(key, XML_OP_ATTR_START_DELAY)) { op->start_delay = crm_parse_int(value, "0"); } } g_hash_table_destroy(hash); versioned_attrs = first_named_child(rsc_op, XML_TAG_RSC_VER_ATTRS); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_iter_steal(&iter); g_hash_table_replace(params, key, value); } g_hash_table_destroy(hash); } lrmd_free_rsc_info(rsc); } #endif if (safe_str_neq(operation, RSC_STOP)) { op->params = params; } else { rsc_history_t *entry = NULL; if (lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* If we do not have stop parameters cached, use * whatever we are given */ if (!entry || !entry->stop_params) { op->params = params; } else { /* Copy the cached parameter list so that we stop the resource * with the old attributes, not the new ones */ op->params = crm_str_table_new(); g_hash_table_foreach(params, copy_meta_keys, op->params); g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params); g_hash_table_destroy(params); params = NULL; } } /* sanity */ if (op->timeout <= 0) { op->timeout = op->interval_ms; } if (op->start_delay < 0) { op->start_delay = 0; } transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = strdup(transition); if (op->interval_ms != 0) { if (safe_str_eq(operation, CRMD_ACTION_START) || safe_str_eq(operation, CRMD_ACTION_STOP)) { crm_err("Start and Stop actions cannot have an interval: %u", op->interval_ms); op->interval_ms = 0; } } crm_trace("Constructed %s op for %s: interval=%u", operation, rsc_id, op->interval_ms); return op; } /*! * \internal * \brief Send a (synthesized) event result * * Reply with a synthesized event result directly, as opposed to going through * the executor. * * \param[in] to_host Host to send result to * \param[in] to_sys IPC name to send result to (NULL for transition engine) * \param[in] rsc Type information about resource the result is for * \param[in] op Event with result to send * \param[in] rsc_id ID of resource the result is for */ void controld_ack_event_directly(const char *to_host, const char *to_sys, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; crm_node_t *peer = NULL; CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { CRM_ASSERT(rsc_id != NULL); op->rsc_id = strdup(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } peer = crm_get_peer(0, fsa_our_uname); update = create_node_state_update(peer, node_update_none, NULL, __FUNCTION__); iter = create_xml_node(update, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, fsa_our_uname, __FUNCTION__); reply = create_request(CRM_OP_INVOKE_LRM, update, to_host, to_sys, CRM_SYSTEM_LRMD, NULL); crm_log_xml_trace(update, "[direct ACK]"); crm_debug("ACK'ing resource op " PCMK__OP_FMT " from %s: %s", op->rsc_id, op->op_type, op->interval_ms, op->user_data, crm_element_value(reply, XML_ATTR_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(update); free_xml(reply); } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { gboolean res = TRUE; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) { /* keep iterating through all even when false is returned */ res = FALSE; } } set_bit(fsa_input_register, R_SENT_RSC_STOP); g_list_free(lrm_state_list); lrm_state_list = NULL; return res; } struct stop_recurring_action_s { lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct stop_recurring_action_s *event = user_data; active_op_t *op = value; if ((op->interval_ms != 0) && crm_str_eq(op->rsc_id, event->rsc->id, TRUE)) { crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key); remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE); } return remove; } static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; lrm_state_t *lrm_state = user_data; active_op_t *op = value; if (op->interval_ms != 0) { crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (const char *) key); remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE); } return remove; } static void record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op) { const char *record_pending = NULL; CRM_CHECK(node_name != NULL, return); CRM_CHECK(rsc != NULL, return); CRM_CHECK(op != NULL, return); // Never record certain operation types as pending if ((op->op_type == NULL) || (op->params == NULL) || !controld_action_is_recordable(op->op_type)) { return; } // defaults to true record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING); if (record_pending && !crm_is_true(record_pending)) { return; } op->call_id = -1; op->op_status = PCMK_LRM_OP_PENDING; op->rc = PCMK_OCF_UNKNOWN; op->t_run = time(NULL); op->t_rcchange = op->t_run; /* write a "pending" entry to the CIB, inhibit notification */ crm_debug("Recording pending op " PCMK__OP_FMT " on %s in the CIB", op->rsc_id, op->op_type, op->interval_ms, node_name); do_update_resource(node_name, rsc, op, 0); } static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *operation, xmlNode *msg) { int call_id = 0; char *op_id = NULL; lrmd_event_data_t *op = NULL; lrmd_key_value_t *params = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; gboolean stop_recurring = FALSE; bool send_nack = FALSE; CRM_CHECK(rsc != NULL, return); CRM_CHECK(operation != NULL, return); if (msg != NULL) { transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY); if (transition == NULL) { crm_log_xml_err(msg, "Missing transition number"); } } op = construct_op(lrm_state, msg, rsc->id, operation); CRM_CHECK(op != NULL, return); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) && (op->interval_ms == 0) && strcmp(operation, CRMD_ACTION_MIGRATE) == 0) { /* pcmk remote connections are a special use case. * We never ever want to stop monitoring a connection resource until * the entire migration has completed. If the connection is unexpectedly * severed, even during a migration, this is an event we must detect.*/ stop_recurring = FALSE; } else if ((op->interval_ms == 0) && strcmp(operation, CRMD_ACTION_STATUS) != 0 && strcmp(operation, CRMD_ACTION_NOTIFY) != 0) { /* stop any previous monitor operations before changing the resource state */ stop_recurring = TRUE; } if (stop_recurring == TRUE) { guint removed = 0; struct stop_recurring_action_s data; data.rsc = rsc; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_action_by_rsc, &data); if (removed) { crm_debug("Stopped %u recurring operation%s in preparation for " PCMK__OP_FMT, removed, pcmk__plural_s(removed), rsc->id, operation, op->interval_ms); } } /* now do the op */ crm_info("Performing key=%s op=" PCMK__OP_FMT, transition, rsc->id, operation, op->interval_ms); if (is_set(fsa_input_register, R_SHUTDOWN) && safe_str_eq(operation, RSC_START)) { register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); send_nack = TRUE; } else if (fsa_state != S_NOT_DC && fsa_state != S_POLICY_ENGINE /* Recalculating */ && fsa_state != S_TRANSITION_ENGINE && safe_str_neq(operation, CRMD_ACTION_STOP)) { send_nack = TRUE; } if(send_nack) { crm_notice("Discarding attempt to perform action %s on %s in state %s (shutdown=%s)", operation, rsc->id, fsa_state2string(fsa_state), is_set(fsa_input_register, R_SHUTDOWN)?"true":"false"); op->rc = PCMK_OCF_UNKNOWN_ERROR; op->op_status = PCMK_LRM_OP_INVALID; controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id); lrmd_free_event(op); free(op_id); return; } record_pending_op(lrm_state->node_name, rsc, op); op_id = pcmk__op_key(rsc->id, op->op_type, op->interval_ms); if (op->interval_ms > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(lrm_state, rsc, op_id, FALSE); } if (op->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, op->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { params = lrmd_key_value_add(params, key, value); } } call_id = lrm_state_exec(lrm_state, rsc->id, op->op_type, op->user_data, op->interval_ms, op->timeout, op->start_delay, params); if (call_id <= 0 && lrm_state_is_local(lrm_state)) { crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else if (call_id <= 0) { crm_err("Operation %s on resource %s failed to execute on remote node %s: %d", operation, rsc->id, lrm_state->node_name, call_id); fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR); process_lrm_event(lrm_state, op, NULL, NULL); } else { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); active_op_t *pending = NULL; pending = calloc(1, sizeof(active_op_t)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval_ms = op->interval_ms; pending->op_type = strdup(operation); pending->op_key = strdup(op_id); pending->rsc_id = strdup(rsc->id); pending->start_time = time(NULL); pending->user_data = op->user_data? strdup(op->user_data) : NULL; if (crm_element_value_epoch(msg, XML_CONFIG_ATTR_SHUTDOWN_LOCK, &(pending->lock_time)) != pcmk_ok) { pending->lock_time = 0; } g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending); if ((op->interval_ms > 0) && (op->start_delay > START_DELAY_THRESHOLD)) { int target_rc = 0; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, NULL, NULL, NULL, &target_rc); op->rc = target_rc; op->op_status = PCMK_LRM_OP_DONE; controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id); } pending->params = op->params; op->params = NULL; } free(op_id); lrmd_free_event(op); return; } int last_resource_update = 0; static void cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { switch (rc) { case pcmk_ok: case -pcmk_err_diff_failed: case -pcmk_err_diff_resync: crm_trace("Resource update %d complete: rc=%d", call_id, rc); break; default: crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc)); } if (call_id == last_resource_update) { last_resource_update = 0; trigger_fsa(fsa_source); } } /* Only successful stops, and probes that found the resource inactive, get locks * recorded in the history. This ensures the resource stays locked to the node * until it is active there again after the node comes back up. */ static bool should_preserve_lock(lrmd_event_data_t *op) { if (!controld_shutdown_lock_enabled) { return false; } if (!strcmp(op->op_type, RSC_STOP) && (op->rc == PCMK_OCF_OK)) { return true; } if (!strcmp(op->op_type, RSC_STATUS) && (op->rc == PCMK_OCF_NOT_RUNNING)) { return true; } return false; } static int do_update_resource(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, time_t lock_time) { /* */ int rc = pcmk_ok; xmlNode *update, *iter = NULL; int call_opt = crmd_cib_smart_opt(); const char *uuid = NULL; CRM_CHECK(op != NULL, return 0); iter = create_xml_node(iter, XML_CIB_TAG_STATUS); update = iter; iter = create_xml_node(iter, XML_CIB_TAG_STATE); if (safe_str_eq(node_name, fsa_our_uname)) { uuid = fsa_our_uuid; } else { /* remote nodes uuid and uname are equal */ uuid = node_name; crm_xml_add(iter, XML_NODE_IS_REMOTE, "true"); } CRM_LOG_ASSERT(uuid != NULL); if(uuid == NULL) { rc = -EINVAL; goto done; } crm_xml_add(iter, XML_ATTR_UUID, uuid); crm_xml_add(iter, XML_ATTR_UNAME, node_name); crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__); iter = create_xml_node(iter, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, node_name, __FUNCTION__); if (rsc) { const char *container = NULL; crm_xml_add(iter, XML_ATTR_TYPE, rsc->type); crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->standard); crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider); if (lock_time != 0) { /* Actions on a locked resource should either preserve the lock by * recording it with the action result, or clear it. */ if (!should_preserve_lock(op)) { lock_time = 0; } crm_xml_add_ll(iter, XML_CONFIG_ATTR_SHUTDOWN_LOCK, (long long) lock_time); } if (op->params) { container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); } if (container) { crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container); crm_xml_add(iter, XML_RSC_ATTR_CONTAINER, container); } } else { crm_warn("Resource %s no longer exists in the executor", op->rsc_id); controld_ack_event_directly(NULL, NULL, rsc, op, op->rsc_id); goto cleanup; } crm_log_xml_trace(update, __FUNCTION__); /* make it an asynchronous call and be done with it * * Best case: * the resource state will be discovered during * the next signup or election. * * Bad case: * we are shutting down and there is no DC at the time, * but then why were we shutting down then anyway? * (probably because of an internal error) * * Worst case: * we get shot for having resources "running" that really weren't * * the alternative however means blocking here for too long, which * isn't acceptable */ fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL); if (rc > 0) { last_resource_update = rc; } done: /* the return code is a call number, not an error code */ crm_trace("Sent resource state update message: %d for %s=%u on %s", rc, op->op_type, op->interval_ms, op->rsc_id); fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback); cleanup: free_xml(update); return rc; } void do_lrm_event(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data) { CRM_CHECK(FALSE, return); } static char * unescape_newlines(const char *string) { char *pch = NULL; char *ret = NULL; static const char *escaped_newline = "\\n"; if (!string) { return NULL; } ret = strdup(string); pch = strstr(ret, escaped_newline); while (pch != NULL) { /* Replace newline escape pattern with actual newline (and a space so we * don't have to shuffle the rest of the buffer) */ pch[0] = '\n'; pch[1] = ' '; pch = strstr(pch, escaped_newline); } return ret; } static bool did_lrm_rsc_op_fail(lrm_state_t *lrm_state, const char * rsc_id, const char * op_type, guint interval_ms) { rsc_history_t *entry = NULL; CRM_CHECK(lrm_state != NULL, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); CRM_CHECK(op_type != NULL, return FALSE); entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->failed == NULL) { return FALSE; } if (crm_str_eq(entry->failed->rsc_id, rsc_id, TRUE) && safe_str_eq(entry->failed->op_type, op_type) && entry->failed->interval_ms == interval_ms) { return TRUE; } return FALSE; } void process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, active_op_t *pending, xmlNode *action_xml) { char *op_id = NULL; char *op_key = NULL; int update_id = 0; gboolean remove = FALSE; gboolean removed = FALSE; bool need_direct_ack = FALSE; lrmd_rsc_info_t *rsc = NULL; const char *node_name = NULL; CRM_CHECK(op != NULL, return); CRM_CHECK(op->rsc_id != NULL, return); // Remap new status codes for older DCs if (compare_version(fsa_our_dc_version, "3.2.0") < 0) { switch (op->op_status) { case PCMK_LRM_OP_NOT_CONNECTED: op->op_status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_CONNECTION_DIED; break; case PCMK_LRM_OP_INVALID: op->op_status = PCMK_LRM_OP_ERROR; op->rc = CRM_DIRECT_NACK_RC; break; default: break; } } op_id = make_stop_id(op->rsc_id, op->call_id); op_key = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms); // Get resource info if available (from executor state or action XML) if (lrm_state) { rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); } if ((rsc == NULL) && action_xml) { xmlNode *xml = find_xml_node(action_xml, XML_CIB_TAG_RESOURCE, TRUE); const char *standard = crm_element_value(xml, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER); const char *type = crm_element_value(xml, XML_ATTR_TYPE); if (standard && type) { crm_info("%s agent information not cached, using %s%s%s:%s from action XML", op->rsc_id, standard, (provider? ":" : ""), (provider? provider : ""), type); rsc = lrmd_new_rsc_info(op->rsc_id, standard, provider, type); } else { crm_err("Can't process %s result because %s agent information not cached or in XML", op_key, op->rsc_id); } } // Get node name if available (from executor state or action XML) if (lrm_state) { node_name = lrm_state->node_name; } else if (action_xml) { node_name = crm_element_value(action_xml, XML_LRM_ATTR_TARGET); } if(pending == NULL) { remove = TRUE; if (lrm_state) { pending = g_hash_table_lookup(lrm_state->pending_ops, op_id); } } if (op->op_status == PCMK_LRM_OP_ERROR) { switch(op->rc) { case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_MASTER: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_MASTER: // Leave it to the TE/scheduler to decide if this is an error op->op_status = PCMK_LRM_OP_DONE; break; default: /* Nothing to do */ break; } } if (op->op_status != PCMK_LRM_OP_CANCELLED) { /* We might not record the result, so directly acknowledge it to the * originator instead, so it doesn't time out waiting for the result * (especially important if part of a transition). */ need_direct_ack = TRUE; if (controld_action_is_recordable(op->op_type)) { if (node_name && rsc) { // We should record the result, and happily, we can update_id = do_update_resource(node_name, rsc, op, pending? pending->lock_time : 0); need_direct_ack = FALSE; } else if (op->rsc_deleted) { /* We shouldn't record the result (likely the resource was * refreshed, cleaned, or removed while this operation was * in flight). */ crm_notice("Not recording %s result in CIB because " "resource information was removed since it was initiated", op_key); } else { /* This shouldn't be possible; the executor didn't consider the * resource deleted, but we couldn't find resource or node * information. */ crm_err("Unable to record %s result in CIB: %s", op_key, (node_name? "No resource information" : "No node name")); } } } else if (op->interval_ms == 0) { /* A non-recurring operation was cancelled. Most likely, the * never-initiated action was removed from the executor's pending * operations list upon resource removal. */ need_direct_ack = TRUE; } else if (pending == NULL) { /* This recurring operation was cancelled, but was not pending. No * transition actions are waiting on it, nothing needs to be done. */ } else if (op->user_data == NULL) { /* This recurring operation was cancelled and pending, but we don't * have a transition key. This should never happen. */ crm_err("Recurring operation %s was cancelled without transition information", op_key); } else if (is_set(pending->flags, active_op_remove)) { /* This recurring operation was cancelled (by us) and pending, and we * have been waiting for it to finish. */ if (lrm_state) { erase_lrm_history_by_op(lrm_state, op); } /* If the recurring operation had failed, the lrm_rsc_op is recorded as * "last_failure" which won't get erased from the cib given the logic on * purpose in erase_lrm_history_by_op(). So that the cancel action won't * have a chance to get confirmed by DC with process_op_deletion(). * Cluster transition would get stuck waiting for the remaining action * timer to time out. * * Directly acknowledge the cancel operation in this case. */ if (did_lrm_rsc_op_fail(lrm_state, pending->rsc_id, pending->op_type, pending->interval_ms)) { need_direct_ack = TRUE; } } else if (op->rsc_deleted) { /* This recurring operation was cancelled (but not by us, and the * executor does not have resource information, likely due to resource * cleanup, refresh, or removal) and pending. */ crm_debug("Recurring op %s was cancelled due to resource deletion", op_key); need_direct_ack = TRUE; } else { /* This recurring operation was cancelled (but not by us, likely by the * executor before stopping the resource) and pending. We don't need to * do anything special. */ } if (need_direct_ack) { controld_ack_event_directly(NULL, NULL, NULL, op, op->rsc_id); } if(remove == FALSE) { /* The caller will do this afterwards, but keep the logging consistent */ removed = TRUE; } else if (lrm_state && ((op->interval_ms == 0) || (op->op_status == PCMK_LRM_OP_CANCELLED))) { gboolean found = g_hash_table_remove(lrm_state->pending_ops, op_id); if (op->interval_ms != 0) { removed = TRUE; } else if (found) { removed = TRUE; crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed", op_key, op->call_id, op_id, g_hash_table_size(lrm_state->pending_ops)); } } if (node_name == NULL) { node_name = "unknown node"; // for logging } switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: crm_info("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s confirmed=%s", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, (removed? "true" : "false")); break; case PCMK_LRM_OP_DONE: - crm_notice("Result of %s operation for %s on %s: %d (%s) " - CRM_XS " call=%d key=%s confirmed=%s cib-update=%d", + crm_notice("Result of %s operation for %s on %s: %s " + CRM_XS " rc=%d call=%d key=%s confirmed=%s cib-update=%d", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, node_name, - op->rc, services_ocf_exitcode_str(op->rc), + services_ocf_exitcode_str(op->rc), op->rc, op->call_id, op_key, (removed? "true" : "false"), update_id); break; case PCMK_LRM_OP_TIMEOUT: crm_err("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s timeout=%dms", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, op->timeout); break; default: crm_err("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s confirmed=%s status=%d cib-update=%d", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, (removed? "true" : "false"), op->op_status, update_id); } if (op->output) { char *prefix = crm_strdup_printf("%s-" PCMK__OP_FMT ":%d", node_name, op->rsc_id, op->op_type, op->interval_ms, op->call_id); if (op->rc) { crm_log_output(LOG_NOTICE, prefix, op->output); } else { crm_log_output(LOG_DEBUG, prefix, op->output); } free(prefix); } if (lrm_state) { if (safe_str_neq(op->op_type, RSC_METADATA)) { crmd_alert_resource_op(lrm_state->node_name, op); } else if (rsc && (op->rc == PCMK_OCF_OK)) { char *metadata = unescape_newlines(op->output); metadata_cache_update(lrm_state->metadata_cache, rsc, metadata); free(metadata); } } if (op->rsc_deleted) { crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); if (lrm_state) { delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL); } } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ mainloop_set_trigger(fsa_source); if (lrm_state && rsc) { update_history_cache(lrm_state, rsc, op); } lrmd_free_rsc_info(rsc); free(op_key); free(op_id); } diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c index 97fd4ad928..e871326426 100644 --- a/daemons/controld/controld_fencing.c +++ b/daemons/controld/controld_fencing.c @@ -1,958 +1,957 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include static void tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event); /* * stonith failure counting * * We don't want to get stuck in a permanent fencing loop. Keep track of the * number of fencing failures for each target node, and the most we'll restart a * transition for. */ struct st_fail_rec { int count; }; static bool fence_reaction_panic = FALSE; static unsigned long int stonith_max_attempts = 10; static GHashTable *stonith_failures = NULL; // crmd_opts defines default for stonith-max-attempts, so value is never NULL void update_stonith_max_attempts(const char *value) { if (safe_str_eq(value, CRM_INFINITY_S)) { stonith_max_attempts = CRM_SCORE_INFINITY; } else { stonith_max_attempts = (unsigned long int) crm_parse_ll(value, NULL); } } void set_fence_reaction(const char *reaction_s) { if (safe_str_eq(reaction_s, "panic")) { fence_reaction_panic = TRUE; } else { if (safe_str_neq(reaction_s, "stop")) { crm_warn("Invalid value '%s' for %s, using 'stop'", reaction_s, XML_CONFIG_ATTR_FENCE_REACTION); } fence_reaction_panic = FALSE; } } static gboolean too_many_st_failures(const char *target) { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *value = NULL; if (stonith_failures == NULL) { return FALSE; } if (target == NULL) { g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { if (value->count >= stonith_max_attempts) { target = (const char*)key; goto too_many; } } } else { value = g_hash_table_lookup(stonith_failures, target); if ((value != NULL) && (value->count >= stonith_max_attempts)) { goto too_many; } } return FALSE; too_many: crm_warn("Too many failures (%d) to fence %s, giving up", value->count, target); return TRUE; } /*! * \internal * \brief Reset a stonith fail count * * \param[in] target Name of node to reset, or NULL for all */ void st_fail_count_reset(const char *target) { if (stonith_failures == NULL) { return; } if (target) { struct st_fail_rec *rec = NULL; rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count = 0; } } else { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *rec = NULL; g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &rec)) { rec->count = 0; } } } static void st_fail_count_increment(const char *target) { struct st_fail_rec *rec = NULL; if (stonith_failures == NULL) { stonith_failures = crm_str_table_new(); } rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count++; } else { rec = malloc(sizeof(struct st_fail_rec)); if(rec == NULL) { return; } rec->count = 1; g_hash_table_insert(stonith_failures, strdup(target), rec); } } /* end stonith fail count functions */ static void cib_fencing_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { if (rc < pcmk_ok) { crm_err("Fencing update %d for %s: failed - %s (%d)", call_id, (char *)user_data, pcmk_strerror(rc), rc); crm_log_xml_warn(msg, "Failed update"); abort_transition(INFINITY, tg_shutdown, "CIB update failed", NULL); } else { crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data); } } static void send_stonith_update(crm_action_t *action, const char *target, const char *uuid) { int rc = pcmk_ok; crm_node_t *peer = NULL; /* We (usually) rely on the membership layer to do node_update_cluster, * and the peer status callback to do node_update_peer, because the node * might have already rejoined before we get the stonith result here. */ int flags = node_update_join | node_update_expected; /* zero out the node-status & remove all LRM status info */ xmlNode *node_state = NULL; CRM_CHECK(target != NULL, return); CRM_CHECK(uuid != NULL, return); /* Make sure the membership and join caches are accurate */ peer = crm_get_peer_full(0, target, CRM_GET_PEER_ANY); CRM_CHECK(peer != NULL, return); if (peer->state == NULL) { /* Usually, we rely on the membership layer to update the cluster state * in the CIB. However, if the node has never been seen, do it here, so * the node is not considered unclean. */ flags |= node_update_cluster; } if (peer->uuid == NULL) { crm_info("Recording uuid '%s' for node '%s'", uuid, target); peer->uuid = strdup(uuid); } crmd_peer_down(peer, TRUE); /* Generate a node state update for the CIB */ node_state = create_node_state_update(peer, flags, NULL, __FUNCTION__); /* we have to mark whether or not remote nodes have already been fenced */ if (peer->flags & crm_remote_node) { time_t now = time(NULL); char *now_s = crm_itoa(now); crm_xml_add(node_state, XML_NODE_IS_FENCED, now_s); free(now_s); } /* Force our known ID */ crm_xml_add(node_state, XML_ATTR_UUID, uuid); rc = fsa_cib_conn->cmds->update(fsa_cib_conn, XML_CIB_TAG_STATUS, node_state, cib_quorum_override | cib_scope_local | cib_can_create); /* Delay processing the trigger until the update completes */ crm_debug("Sending fencing update %d for %s", rc, target); fsa_register_cib_callback(rc, FALSE, strdup(target), cib_fencing_updated); /* Make sure it sticks */ /* fsa_cib_conn->cmds->bump_epoch(fsa_cib_conn, cib_quorum_override|cib_scope_local); */ controld_delete_node_state(peer->uname, controld_section_all, cib_scope_local); free_xml(node_state); return; } /*! * \internal * \brief Abort transition due to stonith failure * * \param[in] abort_action Whether to restart or stop transition * \param[in] target Don't restart if this (NULL for any) has too many failures * \param[in] reason Log this stonith action XML as abort reason (or NULL) */ static void abort_for_stonith_failure(enum transition_action abort_action, const char *target, xmlNode *reason) { /* If stonith repeatedly fails, we eventually give up on starting a new * transition for that reason. */ if ((abort_action != tg_stop) && too_many_st_failures(target)) { abort_action = tg_stop; } abort_transition(INFINITY, abort_action, "Stonith failed", reason); } /* * stonith cleanup list * * If the DC is shot, proper notifications might not go out. * The stonith cleanup list allows the cluster to (re-)send * notifications once a new DC is elected. */ static GListPtr stonith_cleanup_list = NULL; /*! * \internal * \brief Add a node to the stonith cleanup list * * \param[in] target Name of node to add */ void add_stonith_cleanup(const char *target) { stonith_cleanup_list = g_list_append(stonith_cleanup_list, strdup(target)); } /*! * \internal * \brief Remove a node from the stonith cleanup list * * \param[in] Name of node to remove */ void remove_stonith_cleanup(const char *target) { GListPtr iter = stonith_cleanup_list; while (iter != NULL) { GListPtr tmp = iter; char *iter_name = tmp->data; iter = iter->next; if (safe_str_eq(target, iter_name)) { crm_trace("Removing %s from the cleanup list", iter_name); stonith_cleanup_list = g_list_delete_link(stonith_cleanup_list, tmp); free(iter_name); } } } /*! * \internal * \brief Purge all entries from the stonith cleanup list */ void purge_stonith_cleanup() { if (stonith_cleanup_list) { GListPtr iter = NULL; for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; crm_info("Purging %s from stonith cleanup list", target); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; } } /*! * \internal * \brief Send stonith updates for all entries in cleanup list, then purge it */ void execute_stonith_cleanup() { GListPtr iter; for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; crm_node_t *target_node = crm_get_peer(0, target); const char *uuid = crm_peer_uuid(target_node); crm_notice("Marking %s, target of a previous stonith action, as clean", target); send_stonith_update(NULL, target, uuid); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; } /* end stonith cleanup list functions */ /* stonith API client * * Functions that need to interact directly with the fencer via its API */ static stonith_t *stonith_api = NULL; static crm_trigger_t *stonith_reconnect = NULL; static char *te_client_id = NULL; static gboolean fail_incompletable_stonith(crm_graph_t *graph) { GListPtr lpc = NULL; const char *task = NULL; xmlNode *last_action = NULL; if (graph == NULL) { return FALSE; } for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { GListPtr lpc2 = NULL; synapse_t *synapse = (synapse_t *) lpc->data; if (synapse->confirmed) { continue; } for (lpc2 = synapse->actions; lpc2 != NULL; lpc2 = lpc2->next) { crm_action_t *action = (crm_action_t *) lpc2->data; if (action->type != action_type_crm || action->confirmed) { continue; } task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (task && safe_str_eq(task, CRM_OP_FENCE)) { action->failed = TRUE; last_action = action->xml; update_graph(graph, action); crm_notice("Failing action %d (%s): fencer terminated", action->id, ID(action->xml)); } } } if (last_action != NULL) { crm_warn("Fencer failure resulted in unrunnable actions"); abort_for_stonith_failure(tg_restart, NULL, last_action); return TRUE; } return FALSE; } static void tengine_stonith_connection_destroy(stonith_t *st, stonith_event_t *e) { te_cleanup_stonith_history_sync(st, FALSE); if (is_set(fsa_input_register, R_ST_REQUIRED)) { crm_crit("Fencing daemon connection failed"); mainloop_set_trigger(stonith_reconnect); } else { crm_info("Fencing daemon disconnected"); } if (stonith_api) { /* the client API won't properly reconnect notifications * if they are still in the table - so remove them */ if (stonith_api->state != stonith_disconnected) { stonith_api->cmds->disconnect(st); } stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT); stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_FENCE); stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED); } if (AM_I_DC) { fail_incompletable_stonith(transition_graph); trigger_graph(); } } static void tengine_stonith_notify(stonith_t *st, stonith_event_t *st_event) { if (te_client_id == NULL) { te_client_id = crm_strdup_printf("%s.%lu", crm_system_name, (unsigned long) getpid()); } if (st_event == NULL) { crm_err("Notify data not found"); return; } crmd_alert_fencing_op(st_event); if ((st_event->result == pcmk_ok) && safe_str_eq("on", st_event->action)) { crm_notice("%s was successfully unfenced by %s (at the request of %s)", st_event->target, st_event->executioner? st_event->executioner : "", st_event->origin); /* TODO: Hook up st_event->device */ return; } else if (safe_str_eq("on", st_event->action)) { crm_err("Unfencing of %s by %s failed: %s (%d)", st_event->target, st_event->executioner? st_event->executioner : "", pcmk_strerror(st_event->result), st_event->result); return; } else if ((st_event->result == pcmk_ok) && crm_str_eq(st_event->target, fsa_our_uname, TRUE)) { /* We were notified of our own fencing. Most likely, either fencing was * misconfigured, or fabric fencing that doesn't cut cluster * communication is in use. * * Either way, shutting down the local host is a good idea, to require * administrator intervention. Also, other nodes would otherwise likely * set our status to lost because of the fencing callback and discard * our subsequent election votes as "not part of our cluster". */ crm_crit("We were allegedly just fenced by %s for %s!", st_event->executioner? st_event->executioner : "the cluster", st_event->origin); /* Dumps blackbox if enabled */ if (fence_reaction_panic) { pcmk_panic(__FUNCTION__); } else { crm_exit(CRM_EX_FATAL); } return; } /* Update the count of stonith failures for this target, in case we become * DC later. The current DC has already updated its fail count in * tengine_stonith_callback(). */ if (!AM_I_DC && safe_str_eq(st_event->operation, T_STONITH_NOTIFY_FENCE)) { if (st_event->result == pcmk_ok) { st_fail_count_reset(st_event->target); } else { st_fail_count_increment(st_event->target); } } crm_notice("Peer %s was%s terminated (%s) by %s on behalf of %s: %s " CRM_XS " initiator=%s ref=%s", st_event->target, st_event->result == pcmk_ok ? "" : " not", st_event->action, st_event->executioner ? st_event->executioner : "", (st_event->client_origin? st_event->client_origin : ""), pcmk_strerror(st_event->result), st_event->origin, st_event->id); if (st_event->result == pcmk_ok) { crm_node_t *peer = crm_find_known_peer_full(0, st_event->target, CRM_GET_PEER_ANY); const char *uuid = NULL; gboolean we_are_executioner = safe_str_eq(st_event->executioner, fsa_our_uname); if (peer == NULL) { return; } uuid = crm_peer_uuid(peer); crm_trace("target=%s dc=%s", st_event->target, fsa_our_dc); if(AM_I_DC) { /* The DC always sends updates */ send_stonith_update(NULL, st_event->target, uuid); /* @TODO Ideally, at this point, we'd check whether the fenced node * hosted any guest nodes, and call remote_node_down() for them. * Unfortunately, the controller doesn't have a simple, reliable way * to map hosts to guests. It might be possible to track this in the * peer cache via crm_remote_peer_cache_refresh(). For now, we rely * on the scheduler creating fence pseudo-events for the guests. */ if (st_event->client_origin && safe_str_neq(st_event->client_origin, te_client_id)) { /* Abort the current transition graph if it wasn't us * that invoked stonith to fence someone */ crm_info("External fencing operation from %s fenced %s", st_event->client_origin, st_event->target); abort_transition(INFINITY, tg_restart, "External Fencing Operation", NULL); } /* Assume it was our leader if we don't currently have one */ } else if (((fsa_our_dc == NULL) || safe_str_eq(fsa_our_dc, st_event->target)) && is_not_set(peer->flags, crm_remote_node)) { - crm_notice("Target %s our leader %s (recorded: %s)", - fsa_our_dc ? "was" : "may have been", st_event->target, - fsa_our_dc ? fsa_our_dc : ""); + crm_notice("Fencing target %s %s our leader", + st_event->target, (fsa_our_dc? "was" : "may have been")); /* Given the CIB resyncing that occurs around elections, * have one node update the CIB now and, if the new DC is different, * have them do so too after the election */ if (we_are_executioner) { send_stonith_update(NULL, st_event->target, uuid); } add_stonith_cleanup(st_event->target); } /* If the target is a remote node, and we host its connection, * immediately fail all monitors so it can be recovered quickly. * The connection won't necessarily drop when a remote node is fenced, * so the failure might not otherwise be detected until the next poke. */ if (is_set(peer->flags, crm_remote_node)) { remote_ra_fail(st_event->target); } crmd_peer_down(peer, TRUE); } } /*! * \brief Connect to fencer * * \param[in] user_data If NULL, retry failures now, otherwise retry in main loop * * \return TRUE * \note If user_data is NULL, this will wait 2s between attempts, for up to * 30 attempts, meaning the controller could be blocked as long as 58s. */ static gboolean te_connect_stonith(gpointer user_data) { int rc = pcmk_ok; if (stonith_api == NULL) { stonith_api = stonith_api_new(); if (stonith_api == NULL) { crm_err("Could not connect to fencer: API memory allocation failed"); return TRUE; } } if (stonith_api->state != stonith_disconnected) { crm_trace("Already connected to fencer, no need to retry"); return TRUE; } if (user_data == NULL) { // Blocking (retry failures now until successful) rc = stonith_api_connect_retry(stonith_api, crm_system_name, 30); if (rc != pcmk_ok) { crm_err("Could not connect to fencer in 30 attempts: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } } else { // Non-blocking (retry failures later in main loop) rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL); if (rc != pcmk_ok) { if (is_set(fsa_input_register, R_ST_REQUIRED)) { - crm_err("Fencer connection failed (will retry): %s " - CRM_XS " rc=%d", pcmk_strerror(rc), rc); + crm_notice("Fencer connection failed (will retry): %s " + CRM_XS " rc=%d", pcmk_strerror(rc), rc); mainloop_set_trigger(stonith_reconnect); } else { crm_info("Fencer connection failed (ignoring because no longer required): %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } return TRUE; } } if (rc == pcmk_ok) { stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT, tengine_stonith_connection_destroy); stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_FENCE, tengine_stonith_notify); stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED, tengine_stonith_history_synced); te_trigger_stonith_history_sync(TRUE); crm_notice("Fencer successfully connected"); } return TRUE; } /*! \internal \brief Schedule fencer connection attempt in main loop */ void controld_trigger_fencer_connect() { if (stonith_reconnect == NULL) { stonith_reconnect = mainloop_add_trigger(G_PRIORITY_LOW, te_connect_stonith, GINT_TO_POINTER(TRUE)); } set_bit(fsa_input_register, R_ST_REQUIRED); mainloop_set_trigger(stonith_reconnect); } void controld_disconnect_fencer(bool destroy) { if (stonith_api) { // Prevent fencer connection from coming up again clear_bit(fsa_input_register, R_ST_REQUIRED); if (stonith_api->state != stonith_disconnected) { stonith_api->cmds->disconnect(stonith_api); } stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT); stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_FENCE); stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_HISTORY_SYNCED); } if (destroy) { if (stonith_api) { stonith_api->cmds->free(stonith_api); stonith_api = NULL; } if (stonith_reconnect) { mainloop_destroy_trigger(stonith_reconnect); stonith_reconnect = NULL; } if (te_client_id) { free(te_client_id); te_client_id = NULL; } } } static gboolean do_stonith_history_sync(gpointer user_data) { if (stonith_api && (stonith_api->state != stonith_disconnected)) { stonith_history_t *history = NULL; te_cleanup_stonith_history_sync(stonith_api, FALSE); stonith_api->cmds->history(stonith_api, st_opt_sync_call | st_opt_broadcast, NULL, &history, 5); stonith_history_free(history); return TRUE; } else { crm_info("Skip triggering stonith history-sync as stonith is disconnected"); return FALSE; } } static void tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data) { char *uuid = NULL; int stonith_id = -1; int transition_id = -1; crm_action_t *action = NULL; int call_id = data->call_id; int rc = data->rc; char *userdata = data->userdata; CRM_CHECK(userdata != NULL, return); crm_notice("Stonith operation %d/%s: %s (%d)", call_id, (char *)userdata, pcmk_strerror(rc), rc); if (AM_I_DC == FALSE) { return; } /* crm_info("call=%d, optype=%d, node_name=%s, result=%d, node_list=%s, action=%s", */ /* op->call_id, op->optype, op->node_name, op->op_result, */ /* (char *)op->node_list, op->private_data); */ /* filter out old STONITH actions */ CRM_CHECK(decode_transition_key(userdata, &uuid, &transition_id, &stonith_id, NULL), goto bail); if (transition_graph->complete || stonith_id < 0 || safe_str_neq(uuid, te_uuid) || transition_graph->id != transition_id) { crm_info("Ignoring STONITH action initiated outside of the current transition"); goto bail; } action = controld_get_action(stonith_id); if (action == NULL) { crm_err("Stonith action not matched"); goto bail; } stop_te_timer(action->timer); if (rc == pcmk_ok) { const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); const char *op = crm_meta_value(action->params, "stonith_action"); crm_info("Stonith operation %d for %s passed", call_id, target); if (action->confirmed == FALSE) { te_action_confirmed(action, NULL); if (safe_str_eq("on", op)) { const char *value = NULL; char *now = crm_ttoa(time(NULL)); update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, FALSE); free(now); value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_ALL); update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, FALSE); value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_SECURE); update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, FALSE); } else if (action->sent_update == FALSE) { send_stonith_update(action, target, uuid); action->sent_update = TRUE; } } st_fail_count_reset(target); } else { const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); enum transition_action abort_action = tg_restart; action->failed = TRUE; crm_notice("Stonith operation %d for %s failed (%s): aborting transition.", call_id, target, pcmk_strerror(rc)); /* If no fence devices were available, there's no use in immediately * checking again, so don't start a new transition in that case. */ if (rc == -ENODEV) { crm_warn("No devices found in cluster to fence %s, giving up", target); abort_action = tg_stop; } /* Increment the fail count now, so abort_for_stonith_failure() can * check it. Non-DC nodes will increment it in tengine_stonith_notify(). */ st_fail_count_increment(target); abort_for_stonith_failure(abort_action, target, NULL); } update_graph(transition_graph, action); trigger_graph(); bail: free(userdata); free(uuid); return; } gboolean te_fence_node(crm_graph_t *graph, crm_action_t *action) { int rc = 0; const char *id = NULL; const char *uuid = NULL; const char *target = NULL; const char *type = NULL; char *transition_key = NULL; const char *priority_delay = NULL; gboolean invalid_action = FALSE; enum stonith_call_options options = st_opt_none; id = ID(action->xml); target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); type = crm_meta_value(action->params, "stonith_action"); CRM_CHECK(id != NULL, invalid_action = TRUE); CRM_CHECK(uuid != NULL, invalid_action = TRUE); CRM_CHECK(type != NULL, invalid_action = TRUE); CRM_CHECK(target != NULL, invalid_action = TRUE); if (invalid_action) { crm_log_xml_warn(action->xml, "BadAction"); return FALSE; } priority_delay = crm_meta_value(action->params, XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY); crm_notice("Requesting fencing (%s) of node %s " CRM_XS " action=%s timeout=%u priority_delay=%s", type, target, id, transition_graph->stonith_timeout, priority_delay); /* Passing NULL means block until we can connect... */ te_connect_stonith(NULL); if (crmd_join_phase_count(crm_join_confirmed) == 1) { options |= st_opt_allow_suicide; } rc = stonith_api->cmds->fence_with_delay(stonith_api, options, target, type, (int) (transition_graph->stonith_timeout / 1000), 0, crm_atoi(priority_delay, "-1")); transition_key = pcmk__transition_key(transition_graph->id, action->id, 0, te_uuid), stonith_api->cmds->register_callback(stonith_api, rc, (int) (transition_graph->stonith_timeout / 1000), st_opt_timeout_updates, transition_key, "tengine_stonith_callback", tengine_stonith_callback); return TRUE; } /* end stonith API client functions */ /* * stonith history synchronization * * Each node's fencer keeps track of a cluster-wide fencing history. When a node * joins or leaves, we need to synchronize the history across all nodes. */ static crm_trigger_t *stonith_history_sync_trigger = NULL; static mainloop_timer_t *stonith_history_sync_timer_short = NULL; static mainloop_timer_t *stonith_history_sync_timer_long = NULL; void te_cleanup_stonith_history_sync(stonith_t *st, bool free_timers) { if (free_timers) { mainloop_timer_del(stonith_history_sync_timer_short); stonith_history_sync_timer_short = NULL; mainloop_timer_del(stonith_history_sync_timer_long); stonith_history_sync_timer_long = NULL; } else { mainloop_timer_stop(stonith_history_sync_timer_short); mainloop_timer_stop(stonith_history_sync_timer_long); } if (st) { st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY_SYNCED); } } static void tengine_stonith_history_synced(stonith_t *st, stonith_event_t *st_event) { te_cleanup_stonith_history_sync(st, FALSE); crm_debug("Fence-history synced - cancel all timers"); } static gboolean stonith_history_sync_set_trigger(gpointer user_data) { mainloop_set_trigger(stonith_history_sync_trigger); return FALSE; } void te_trigger_stonith_history_sync(bool long_timeout) { /* trigger a sync in 5s to give more nodes the * chance to show up so that we don't create * unnecessary stonith-history-sync traffic * * the long timeout of 30s is there as a fallback * so that after a successful connection to fenced * we will wait for 30s for the DC to trigger a * history-sync * if this doesn't happen we trigger a sync locally * (e.g. fenced segfaults and is restarted by pacemakerd) */ /* as we are finally checking the stonith-connection * in do_stonith_history_sync we should be fine * leaving stonith_history_sync_time & stonith_history_sync_trigger * around */ if (stonith_history_sync_trigger == NULL) { stonith_history_sync_trigger = mainloop_add_trigger(G_PRIORITY_LOW, do_stonith_history_sync, NULL); } if (long_timeout) { if(stonith_history_sync_timer_long == NULL) { stonith_history_sync_timer_long = mainloop_timer_add("history_sync_long", 30000, FALSE, stonith_history_sync_set_trigger, NULL); } crm_info("Fence history will be synchronized cluster-wide within 30 seconds"); mainloop_timer_start(stonith_history_sync_timer_long); } else { if(stonith_history_sync_timer_short == NULL) { stonith_history_sync_timer_short = mainloop_timer_add("history_sync_short", 5000, FALSE, stonith_history_sync_set_trigger, NULL); } crm_info("Fence history will be synchronized cluster-wide within 5 seconds"); mainloop_timer_start(stonith_history_sync_timer_short); } } /* end stonith history synchronization functions */ diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c index ae9257aa38..4d0e457be7 100644 --- a/daemons/execd/execd_commands.c +++ b/daemons/execd/execd_commands.c @@ -1,1896 +1,1897 @@ /* * Copyright 2012-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include // Check whether we have a high-resolution monotonic clock #undef PCMK__TIME_USE_CGT #if HAVE_DECL_CLOCK_MONOTONIC && defined(CLOCK_MONOTONIC) \ && !defined(PCMK_TIME_EMERGENCY_CGT) # define PCMK__TIME_USE_CGT # include /* clock_gettime */ #endif #include #include #include #include #include #include #include #include "pacemaker-execd.h" #define EXIT_REASON_MAX_LEN 128 GHashTable *rsc_list = NULL; typedef struct lrmd_cmd_s { int timeout; guint interval_ms; int start_delay; int timeout_orig; int call_id; int exec_rc; int lrmd_op_status; int call_opts; /* Timer ids, must be removed on cmd destruction. */ int delay_id; int stonith_recurring_id; int rsc_deleted; int service_flags; char *client_id; char *origin; char *rsc_id; char *action; char *real_action; char *exit_reason; char *output; char *userdata_str; /* We can track operation queue time and run time, to be saved with the CIB * resource history (and displayed in cluster status). We need * high-resolution monotonic time for this purpose, so we use * clock_gettime(CLOCK_MONOTONIC, ...) (if available, otherwise this feature * is disabled). * * However, we also need epoch timestamps for recording the time the command * last ran and the time its return value last changed, for use in time * displays (as opposed to interval calculations). We keep time_t values for * this purpose. * * The last run time is used for both purposes, so we keep redundant * monotonic and epoch values for this. Technically the two could represent * different times, but since time_t has only second resolution and the * values are used for distinct purposes, that is not significant. */ #ifdef PCMK__TIME_USE_CGT /* Recurring and systemd operations may involve more than one executor * command per operation, so they need info about the original and the most * recent. */ struct timespec t_first_run; // When op first ran struct timespec t_run; // When op most recently ran struct timespec t_first_queue; // When op was first queued struct timespec t_queue; // When op was most recently queued #endif time_t epoch_last_run; // Epoch timestamp of when op last ran time_t epoch_rcchange; // Epoch timestamp of when rc last changed int first_notify_sent; int last_notify_rc; int last_notify_op_status; int last_pid; GHashTable *params; } lrmd_cmd_t; static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc); static gboolean lrmd_rsc_dispatch(gpointer user_data); static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id); #ifdef PCMK__TIME_USE_CGT /*! * \internal * \brief Check whether a struct timespec has been set * * \param[in] timespec Time to check * * \return true if timespec has been set (i.e. is nonzero), false otherwise */ static inline bool time_is_set(struct timespec *timespec) { return (timespec != NULL) && ((timespec->tv_sec != 0) || (timespec->tv_nsec != 0)); } /* * \internal * \brief Set a timespec (and its original if unset) to the current time * * \param[out] t_current Where to store current time * \param[out] t_orig Where to copy t_current if unset */ static void get_current_time(struct timespec *t_current, struct timespec *t_orig) { clock_gettime(CLOCK_MONOTONIC, t_current); if ((t_orig != NULL) && !time_is_set(t_orig)) { *t_orig = *t_current; } } /*! * \internal * \brief Return difference between two times in milliseconds * * \param[in] now More recent time (or NULL to use current time) * \param[in] old Earlier time * * \return milliseconds difference (or 0 if old is NULL or unset) * * \note Can overflow on 32bit machines when the differences is around * 24 days or more. */ static int time_diff_ms(struct timespec *now, struct timespec *old) { int diff_ms = 0; if (time_is_set(old)) { struct timespec local_now = { 0, }; if (now == NULL) { clock_gettime(CLOCK_MONOTONIC, &local_now); now = &local_now; } diff_ms = (now->tv_sec - old->tv_sec) * 1000 + (now->tv_nsec - old->tv_nsec) / 1000000; } return diff_ms; } /*! * \internal * \brief Reset a command's operation times to their original values. * * Reset a command's run and queued timestamps to the timestamps of the original * command, so we report the entire time since then and not just the time since * the most recent command (for recurring and systemd operations). * * \param[in] cmd Executor command object to reset * * \note It's not obvious what the queued time should be for a systemd * start/stop operation, which might go like this: * initial command queued 5ms, runs 3s * monitor command queued 10ms, runs 10s * monitor command queued 10ms, runs 10s * Is the queued time for that operation 5ms, 10ms or 25ms? The current * implementation will report 5ms. If it's 25ms, then we need to * subtract 20ms from the total exec time so as not to count it twice. * We can implement that later if it matters to anyone ... */ static void cmd_original_times(lrmd_cmd_t * cmd) { cmd->t_run = cmd->t_first_run; cmd->t_queue = cmd->t_first_queue; } #endif static void log_finished(lrmd_cmd_t * cmd, int exec_time, int queue_time) { char pid_str[32] = { 0, }; int log_level = LOG_INFO; if (cmd->last_pid) { snprintf(pid_str, 32, "%d", cmd->last_pid); } if (safe_str_eq(cmd->action, "monitor")) { log_level = LOG_DEBUG; } - do_crm_log(log_level, "%s %s (call %d%s%s) exited with status %d" #ifdef PCMK__TIME_USE_CGT + do_crm_log(log_level, "%s %s (call %d%s%s) exited with status %d" " (execution time %dms, queue time %dms)", -#endif cmd->rsc_id, cmd->action, cmd->call_id, - (cmd->last_pid? ", PID " : ""), pid_str, cmd->exec_rc -#ifdef PCMK__TIME_USE_CGT - , exec_time, queue_time + (cmd->last_pid? ", PID " : ""), pid_str, cmd->exec_rc, + exec_time, queue_time); +#else + do_crm_log(log_level, "%s %s (call %d%s%s) exited with status %d" + cmd->rsc_id, cmd->action, cmd->call_id, + (cmd->last_pid? ", PID " : ""), pid_str, cmd->exec_rc); #endif - ); } static void log_execute(lrmd_cmd_t * cmd) { int log_level = LOG_INFO; if (safe_str_eq(cmd->action, "monitor")) { log_level = LOG_DEBUG; } do_crm_log(log_level, "executing - rsc:%s action:%s call_id:%d", cmd->rsc_id, cmd->action, cmd->call_id); } static const char * normalize_action_name(lrmd_rsc_t * rsc, const char *action) { if (safe_str_eq(action, "monitor") && is_set(pcmk_get_ra_caps(rsc->class), pcmk_ra_cap_status)) { return "status"; } return action; } static lrmd_rsc_t * build_rsc_from_xml(xmlNode * msg) { xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR); lrmd_rsc_t *rsc = NULL; rsc = calloc(1, sizeof(lrmd_rsc_t)); crm_element_value_int(msg, F_LRMD_CALLOPTS, &rsc->call_opts); rsc->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID); rsc->class = crm_element_value_copy(rsc_xml, F_LRMD_CLASS); rsc->provider = crm_element_value_copy(rsc_xml, F_LRMD_PROVIDER); rsc->type = crm_element_value_copy(rsc_xml, F_LRMD_TYPE); rsc->work = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_rsc_dispatch, rsc); rsc->st_probe_rc = -ENODEV; // if stonith, initialize to "not running" return rsc; } static lrmd_cmd_t * create_lrmd_cmd(xmlNode *msg, pcmk__client_t *client) { int call_options = 0; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR); lrmd_cmd_t *cmd = NULL; cmd = calloc(1, sizeof(lrmd_cmd_t)); crm_element_value_int(msg, F_LRMD_CALLOPTS, &call_options); cmd->call_opts = call_options; cmd->client_id = strdup(client->id); crm_element_value_int(msg, F_LRMD_CALLID, &cmd->call_id); crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &cmd->interval_ms); crm_element_value_int(rsc_xml, F_LRMD_TIMEOUT, &cmd->timeout); crm_element_value_int(rsc_xml, F_LRMD_RSC_START_DELAY, &cmd->start_delay); cmd->timeout_orig = cmd->timeout; cmd->origin = crm_element_value_copy(rsc_xml, F_LRMD_ORIGIN); cmd->action = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ACTION); cmd->userdata_str = crm_element_value_copy(rsc_xml, F_LRMD_RSC_USERDATA_STR); cmd->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID); cmd->params = xml2list(rsc_xml); if (safe_str_eq(g_hash_table_lookup(cmd->params, "CRM_meta_on_fail"), "block")) { crm_debug("Setting flag to leave pid group on timeout and " "only kill action pid for " PCMK__OP_FMT, cmd->rsc_id, cmd->action, cmd->interval_ms); cmd->service_flags |= SVC_ACTION_LEAVE_GROUP; } return cmd; } static void stop_recurring_timer(lrmd_cmd_t *cmd) { if (cmd) { if (cmd->stonith_recurring_id) { g_source_remove(cmd->stonith_recurring_id); } cmd->stonith_recurring_id = 0; } } static void free_lrmd_cmd(lrmd_cmd_t * cmd) { stop_recurring_timer(cmd); if (cmd->delay_id) { g_source_remove(cmd->delay_id); } if (cmd->params) { g_hash_table_destroy(cmd->params); } free(cmd->origin); free(cmd->action); free(cmd->real_action); free(cmd->userdata_str); free(cmd->rsc_id); free(cmd->output); free(cmd->exit_reason); free(cmd->client_id); free(cmd); } static gboolean stonith_recurring_op_helper(gpointer data) { lrmd_cmd_t *cmd = data; lrmd_rsc_t *rsc; cmd->stonith_recurring_id = 0; if (!cmd->rsc_id) { return FALSE; } rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id); CRM_ASSERT(rsc != NULL); /* take it out of recurring_ops list, and put it in the pending ops * to be executed */ rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd); rsc->pending_ops = g_list_append(rsc->pending_ops, cmd); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_queue), &(cmd->t_first_queue)); #endif mainloop_set_trigger(rsc->work); return FALSE; } static inline void start_recurring_timer(lrmd_cmd_t *cmd) { if (cmd && (cmd->interval_ms > 0)) { cmd->stonith_recurring_id = g_timeout_add(cmd->interval_ms, stonith_recurring_op_helper, cmd); } } static gboolean start_delay_helper(gpointer data) { lrmd_cmd_t *cmd = data; lrmd_rsc_t *rsc = NULL; cmd->delay_id = 0; rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL; if (rsc) { mainloop_set_trigger(rsc->work); } return FALSE; } static gboolean merge_recurring_duplicate(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { GListPtr gIter = NULL; lrmd_cmd_t * dup = NULL; gboolean dup_pending = FALSE; if (cmd->interval_ms == 0) { return 0; } for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) { dup = gIter->data; if (safe_str_eq(cmd->action, dup->action) && (cmd->interval_ms == dup->interval_ms)) { dup_pending = TRUE; goto merge_dup; } } /* if dup is in recurring_ops list, that means it has already executed * and is in the interval loop. we can't just remove it in this case. */ for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) { dup = gIter->data; if (safe_str_eq(cmd->action, dup->action) && (cmd->interval_ms == dup->interval_ms)) { goto merge_dup; } } return FALSE; merge_dup: /* This should not occur. If it does, we need to investigate how something * like this is possible in the controller. */ crm_warn("Duplicate recurring op entry detected (" PCMK__OP_FMT "), merging with previous op entry", rsc->rsc_id, normalize_action_name(rsc, dup->action), dup->interval_ms); /* merge */ dup->first_notify_sent = 0; free(dup->userdata_str); dup->userdata_str = cmd->userdata_str; cmd->userdata_str = NULL; dup->call_id = cmd->call_id; if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) { /* if we are waiting for the next interval, kick it off now */ if (dup_pending == TRUE) { stop_recurring_timer(cmd); stonith_recurring_op_helper(cmd); } } else if (dup_pending == FALSE) { /* if we've already handed this to the service lib, kick off an early execution */ services_action_kick(rsc->rsc_id, normalize_action_name(rsc, dup->action), dup->interval_ms); } free_lrmd_cmd(cmd); return TRUE; } static void schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { gboolean dup_processed = FALSE; CRM_CHECK(cmd != NULL, return); CRM_CHECK(rsc != NULL, return); crm_trace("Scheduling %s on %s", cmd->action, rsc->rsc_id); dup_processed = merge_recurring_duplicate(rsc, cmd); if (dup_processed) { /* duplicate recurring cmd found, cmds merged */ return; } /* The controller expects the executor to automatically cancel * recurring operations before a resource stops. */ if (safe_str_eq(cmd->action, "stop")) { cancel_all_recurring(rsc, NULL); } rsc->pending_ops = g_list_append(rsc->pending_ops, cmd); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_queue), &(cmd->t_first_queue)); #endif mainloop_set_trigger(rsc->work); if (cmd->start_delay) { cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd); } } static xmlNode * create_lrmd_reply(const char *origin, int rc, int call_id) { xmlNode *reply = create_xml_node(NULL, T_LRMD_REPLY); crm_xml_add(reply, F_LRMD_ORIGIN, origin); crm_xml_add_int(reply, F_LRMD_RC, rc); crm_xml_add_int(reply, F_LRMD_CALLID, call_id); return reply; } static void send_client_notify(gpointer key, gpointer value, gpointer user_data) { xmlNode *update_msg = user_data; pcmk__client_t *client = value; int rc; int log_level = LOG_WARNING; const char *msg = NULL; CRM_CHECK(client != NULL, return); if (client->name == NULL) { crm_trace("Skipping notification to client without name"); return; } rc = lrmd_server_send_notify(client, update_msg); if (rc == pcmk_rc_ok) { return; } switch (rc) { case ENOTCONN: case EPIPE: // Client exited without waiting for notification log_level = LOG_INFO; msg = "Disconnected"; break; default: msg = pcmk_rc_str(rc); break; } do_crm_log(log_level, "Could not notify client %s/%s: %s " CRM_XS " rc=%d", client->name, client->id, msg, rc); } static void send_cmd_complete_notify(lrmd_cmd_t * cmd) { xmlNode *notify = NULL; #ifdef PCMK__TIME_USE_CGT int exec_time = time_diff_ms(NULL, &(cmd->t_run)); int queue_time = time_diff_ms(&cmd->t_run, &(cmd->t_queue)); log_finished(cmd, exec_time, queue_time); #else log_finished(cmd, 0, 0); #endif /* if the first notify result for a cmd has already been sent earlier, and the * the option to only send notifies on result changes is set. Check to see * if the last result is the same as the new one. If so, suppress this update */ if (cmd->first_notify_sent && (cmd->call_opts & lrmd_opt_notify_changes_only)) { if (cmd->last_notify_rc == cmd->exec_rc && cmd->last_notify_op_status == cmd->lrmd_op_status) { /* only send changes */ return; } } cmd->first_notify_sent = 1; cmd->last_notify_rc = cmd->exec_rc; cmd->last_notify_op_status = cmd->lrmd_op_status; notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(notify, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add_int(notify, F_LRMD_TIMEOUT, cmd->timeout); crm_xml_add_ms(notify, F_LRMD_RSC_INTERVAL, cmd->interval_ms); crm_xml_add_int(notify, F_LRMD_RSC_START_DELAY, cmd->start_delay); crm_xml_add_int(notify, F_LRMD_EXEC_RC, cmd->exec_rc); crm_xml_add_int(notify, F_LRMD_OP_STATUS, cmd->lrmd_op_status); crm_xml_add_int(notify, F_LRMD_CALLID, cmd->call_id); crm_xml_add_int(notify, F_LRMD_RSC_DELETED, cmd->rsc_deleted); crm_xml_add_ll(notify, F_LRMD_RSC_RUN_TIME, (long long) cmd->epoch_last_run); crm_xml_add_ll(notify, F_LRMD_RSC_RCCHANGE_TIME, (long long) cmd->epoch_rcchange); #ifdef PCMK__TIME_USE_CGT crm_xml_add_int(notify, F_LRMD_RSC_EXEC_TIME, exec_time); crm_xml_add_int(notify, F_LRMD_RSC_QUEUE_TIME, queue_time); #endif crm_xml_add(notify, F_LRMD_OPERATION, LRMD_OP_RSC_EXEC); crm_xml_add(notify, F_LRMD_RSC_ID, cmd->rsc_id); if(cmd->real_action) { crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->real_action); } else { crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->action); } crm_xml_add(notify, F_LRMD_RSC_USERDATA_STR, cmd->userdata_str); crm_xml_add(notify, F_LRMD_RSC_OUTPUT, cmd->output); crm_xml_add(notify, F_LRMD_RSC_EXIT_REASON, cmd->exit_reason); if (cmd->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; xmlNode *args = create_xml_node(notify, XML_TAG_ATTRS); g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { hash2smartfield((gpointer) key, (gpointer) value, args); } } if (cmd->client_id && (cmd->call_opts & lrmd_opt_notify_orig_only)) { pcmk__client_t *client = pcmk__find_client_by_id(cmd->client_id); if (client) { send_client_notify(client->id, client, notify); } } else { pcmk__foreach_ipc_client(send_client_notify, notify); } free_xml(notify); } static void send_generic_notify(int rc, xmlNode * request) { if (pcmk__ipc_client_count() != 0) { int call_id = 0; xmlNode *notify = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); const char *op = crm_element_value(request, F_LRMD_OPERATION); crm_element_value_int(request, F_LRMD_CALLID, &call_id); notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(notify, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add_int(notify, F_LRMD_RC, rc); crm_xml_add_int(notify, F_LRMD_CALLID, call_id); crm_xml_add(notify, F_LRMD_OPERATION, op); crm_xml_add(notify, F_LRMD_RSC_ID, rsc_id); pcmk__foreach_ipc_client(send_client_notify, notify); free_xml(notify); } } static void cmd_reset(lrmd_cmd_t * cmd) { cmd->lrmd_op_status = 0; cmd->last_pid = 0; #ifdef PCMK__TIME_USE_CGT memset(&cmd->t_run, 0, sizeof(cmd->t_run)); memset(&cmd->t_queue, 0, sizeof(cmd->t_queue)); #endif cmd->epoch_last_run = 0; free(cmd->exit_reason); cmd->exit_reason = NULL; free(cmd->output); cmd->output = NULL; } static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc) { crm_trace("Resource operation rsc:%s action:%s completed (%p %p)", cmd->rsc_id, cmd->action, rsc ? rsc->active : NULL, cmd); if (rsc && (rsc->active == cmd)) { rsc->active = NULL; mainloop_set_trigger(rsc->work); } if (!rsc) { cmd->rsc_deleted = 1; } /* reset original timeout so client notification has correct information */ cmd->timeout = cmd->timeout_orig; send_cmd_complete_notify(cmd); if (cmd->interval_ms && (cmd->lrmd_op_status == PCMK_LRM_OP_CANCELLED)) { if (rsc) { rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd); rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd); } free_lrmd_cmd(cmd); } else if (cmd->interval_ms == 0) { if (rsc) { rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd); } free_lrmd_cmd(cmd); } else { /* Clear all the values pertaining just to the last iteration of a recurring op. */ cmd_reset(cmd); } } static int ocf2uniform_rc(int rc) { if (rc < 0 || rc > PCMK_OCF_FAILED_MASTER) { return PCMK_OCF_UNKNOWN_ERROR; } return rc; } static int stonith2uniform_rc(const char *action, int rc) { switch (rc) { case pcmk_ok: rc = PCMK_OCF_OK; break; case -ENODEV: /* This should be possible only for probes in practice, but * interpret for all actions to be safe. */ if (safe_str_eq(action, "monitor")) { rc = PCMK_OCF_NOT_RUNNING; } else if (safe_str_eq(action, "stop")) { rc = PCMK_OCF_OK; } else { rc = PCMK_OCF_NOT_INSTALLED; } break; case -EOPNOTSUPP: rc = PCMK_OCF_UNIMPLEMENT_FEATURE; break; case -ETIME: case -ETIMEDOUT: rc = PCMK_OCF_TIMEOUT; break; default: rc = PCMK_OCF_UNKNOWN_ERROR; break; } return rc; } #if SUPPORT_NAGIOS static int nagios2uniform_rc(const char *action, int rc) { if (rc < 0) { return PCMK_OCF_UNKNOWN_ERROR; } switch (rc) { case NAGIOS_STATE_OK: return PCMK_OCF_OK; case NAGIOS_INSUFFICIENT_PRIV: return PCMK_OCF_INSUFFICIENT_PRIV; case NAGIOS_NOT_INSTALLED: return PCMK_OCF_NOT_INSTALLED; case NAGIOS_STATE_WARNING: case NAGIOS_STATE_CRITICAL: case NAGIOS_STATE_UNKNOWN: case NAGIOS_STATE_DEPENDENT: default: return PCMK_OCF_UNKNOWN_ERROR; } return PCMK_OCF_UNKNOWN_ERROR; } #endif static int get_uniform_rc(const char *standard, const char *action, int rc) { if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_OCF)) { return ocf2uniform_rc(rc); } else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_STONITH)) { return stonith2uniform_rc(action, rc); } else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_SYSTEMD)) { return rc; } else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_UPSTART)) { return rc; #if SUPPORT_NAGIOS } else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_NAGIOS)) { return nagios2uniform_rc(action, rc); #endif } else { return services_get_ocf_exitcode(action, rc); } } static int action_get_uniform_rc(svc_action_t * action) { lrmd_cmd_t *cmd = action->cb_data; return get_uniform_rc(action->standard, cmd->action, action->rc); } struct notify_new_client_data { xmlNode *notify; pcmk__client_t *new_client; }; static void notify_one_client(gpointer key, gpointer value, gpointer user_data) { pcmk__client_t *client = value; struct notify_new_client_data *data = user_data; if (safe_str_neq(client->id, data->new_client->id)) { send_client_notify(key, (gpointer) client, (gpointer) data->notify); } } void notify_of_new_client(pcmk__client_t *new_client) { struct notify_new_client_data data; data.new_client = new_client; data.notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(data.notify, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add(data.notify, F_LRMD_OPERATION, LRMD_OP_NEW_CLIENT); pcmk__foreach_ipc_client(notify_one_client, &data); free_xml(data.notify); } static char * parse_exit_reason(const char *output) { const char *cur = NULL; const char *last = NULL; static int cookie_len = 0; char *eol = NULL; size_t reason_len = EXIT_REASON_MAX_LEN; if (output == NULL) { return NULL; } if (!cookie_len) { cookie_len = strlen(PCMK_OCF_REASON_PREFIX); } cur = strstr(output, PCMK_OCF_REASON_PREFIX); for (; cur != NULL; cur = strstr(cur, PCMK_OCF_REASON_PREFIX)) { /* skip over the cookie delimiter string */ cur += cookie_len; last = cur; } if (last == NULL) { return NULL; } // Truncate everything after a new line, and limit reason string size eol = strchr(last, '\n'); if (eol) { reason_len = QB_MIN(reason_len, eol - last); } return strndup(last, reason_len); } void client_disconnect_cleanup(const char *client_id) { GHashTableIter iter; lrmd_rsc_t *rsc = NULL; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) { if (rsc->call_opts & lrmd_opt_drop_recurring) { /* This client is disconnecting, drop any recurring operations * it may have initiated on the resource */ cancel_all_recurring(rsc, client_id); } } } static void action_complete(svc_action_t * action) { lrmd_rsc_t *rsc; lrmd_cmd_t *cmd = action->cb_data; const char *rclass = NULL; #ifdef PCMK__TIME_USE_CGT bool goagain = false; #endif if (!cmd) { crm_err("Completed executor action (%s) does not match any known operations", action->id); return; } #ifdef PCMK__TIME_USE_CGT if (cmd->exec_rc != action->rc) { cmd->epoch_rcchange = time(NULL); } #endif cmd->last_pid = action->pid; cmd->exec_rc = action_get_uniform_rc(action); cmd->lrmd_op_status = action->status; rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL; if (rsc && safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_SERVICE)) { rclass = resources_find_service_class(rsc->type); } else if(rsc) { rclass = rsc->class; } #ifdef PCMK__TIME_USE_CGT if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD)) { if ((cmd->exec_rc == PCMK_OCF_OK) && (safe_str_eq(cmd->action, "start") || safe_str_eq(cmd->action, "stop"))) { /* systemd returns from start and stop actions after the action * begins, not after it completes. We have to jump through a few * hoops so that we don't report 'complete' to the rest of pacemaker * until it's actually done. */ goagain = true; cmd->real_action = cmd->action; cmd->action = strdup("monitor"); } else if (cmd->real_action != NULL) { // This is follow-up monitor to check whether start/stop completed if ((cmd->lrmd_op_status == PCMK_LRM_OP_DONE) && (cmd->exec_rc == PCMK_OCF_PENDING)) { goagain = true; } else if ((cmd->exec_rc == PCMK_OCF_OK) && safe_str_eq(cmd->real_action, "stop")) { goagain = true; } else { int time_sum = time_diff_ms(NULL, &(cmd->t_first_run)); int timeout_left = cmd->timeout_orig - time_sum; crm_debug("%s systemd %s is now complete (elapsed=%dms, " "remaining=%dms): %s (%d)", cmd->rsc_id, cmd->real_action, time_sum, timeout_left, services_ocf_exitcode_str(cmd->exec_rc), cmd->exec_rc); cmd_original_times(cmd); // Monitors may return "not running", but start/stop shouldn't if ((cmd->lrmd_op_status == PCMK_LRM_OP_DONE) && (cmd->exec_rc == PCMK_OCF_NOT_RUNNING)) { if (safe_str_eq(cmd->real_action, "start")) { cmd->exec_rc = PCMK_OCF_UNKNOWN_ERROR; } else if (safe_str_eq(cmd->real_action, "stop")) { cmd->exec_rc = PCMK_OCF_OK; } } } } } #endif #if SUPPORT_NAGIOS if (rsc && safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS)) { if (safe_str_eq(cmd->action, "monitor") && (cmd->interval_ms == 0) && cmd->exec_rc == PCMK_OCF_OK) { /* Successfully executed --version for the nagios plugin */ cmd->exec_rc = PCMK_OCF_NOT_RUNNING; } else if (safe_str_eq(cmd->action, "start") && cmd->exec_rc != PCMK_OCF_OK) { #ifdef PCMK__TIME_USE_CGT goagain = true; #endif } } #endif #ifdef PCMK__TIME_USE_CGT if (goagain) { int time_sum = time_diff_ms(NULL, &(cmd->t_first_run)); int timeout_left = cmd->timeout_orig - time_sum; int delay = cmd->timeout_orig / 10; if(delay >= timeout_left && timeout_left > 20) { delay = timeout_left/2; } delay = QB_MIN(2000, delay); if (delay < timeout_left) { cmd->start_delay = delay; cmd->timeout = timeout_left; if(cmd->exec_rc == PCMK_OCF_OK) { crm_debug("%s %s may still be in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->real_action, time_sum, timeout_left, delay); } else if(cmd->exec_rc == PCMK_OCF_PENDING) { crm_info("%s %s is still in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->action, time_sum, timeout_left, delay); } else { crm_notice("%s %s failed '%s' (%d): re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->action, services_ocf_exitcode_str(cmd->exec_rc), cmd->exec_rc, time_sum, timeout_left, delay); } cmd_reset(cmd); if(rsc) { rsc->active = NULL; } schedule_lrmd_cmd(rsc, cmd); /* Don't finalize cmd, we're not done with it yet */ return; } else { crm_notice("Giving up on %s %s (rc=%d): timeout (elapsed=%dms, remaining=%dms)", cmd->rsc_id, cmd->real_action?cmd->real_action:cmd->action, cmd->exec_rc, time_sum, timeout_left); cmd->lrmd_op_status = PCMK_LRM_OP_TIMEOUT; cmd->exec_rc = PCMK_OCF_TIMEOUT; cmd_original_times(cmd); } } #endif if (action->stderr_data) { cmd->output = strdup(action->stderr_data); cmd->exit_reason = parse_exit_reason(action->stderr_data); } else if (action->stdout_data) { cmd->output = strdup(action->stdout_data); } cmd_finalize(cmd, rsc); } /*! * \internal * \brief Determine operation status of a stonith operation * * Non-stonith resource operations get their operation status directly from the * service library, but the fencer does not have an equivalent, so we must infer * an operation status from the fencer API's return code. * * \param[in] action Name of action performed on stonith resource * \param[in] interval_ms Action interval * \param[in] rc Action result from fencer * * \return Operation status corresponding to fencer API return code */ static int stonith_rc2status(const char *action, guint interval_ms, int rc) { int status = PCMK_LRM_OP_DONE; switch (rc) { case pcmk_ok: break; case -EOPNOTSUPP: case -EPROTONOSUPPORT: status = PCMK_LRM_OP_NOTSUPPORTED; break; case -ETIME: case -ETIMEDOUT: status = PCMK_LRM_OP_TIMEOUT; break; case -ENOTCONN: case -ECOMM: // Couldn't talk to fencer status = PCMK_LRM_OP_ERROR; break; case -ENODEV: // The device is not registered with the fencer status = PCMK_LRM_OP_ERROR; break; default: break; } return status; } static void stonith_action_complete(lrmd_cmd_t * cmd, int rc) { // This can be NULL if resource was removed before command completed lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id); cmd->exec_rc = stonith2uniform_rc(cmd->action, rc); /* This function may be called with status already set to cancelled, if a * pending action was aborted. Otherwise, we need to determine status from * the fencer return code. */ if (cmd->lrmd_op_status != PCMK_LRM_OP_CANCELLED) { cmd->lrmd_op_status = stonith_rc2status(cmd->action, cmd->interval_ms, rc); // Certain successful actions change the known state of the resource if (rsc && (cmd->exec_rc == PCMK_OCF_OK)) { if (safe_str_eq(cmd->action, "start")) { rsc->st_probe_rc = pcmk_ok; // maps to PCMK_OCF_OK } else if (safe_str_eq(cmd->action, "stop")) { rsc->st_probe_rc = -ENODEV; // maps to PCMK_OCF_NOT_RUNNING } } } /* The recurring timer should not be running at this point in any case, but * as a failsafe, stop it if it is. */ stop_recurring_timer(cmd); /* Reschedule this command if appropriate. If a recurring command is *not* * rescheduled, its status must be PCMK_LRM_OP_CANCELLED, otherwise it will * not be removed from recurring_ops by cmd_finalize(). */ if (rsc && (cmd->interval_ms > 0) && (cmd->lrmd_op_status != PCMK_LRM_OP_CANCELLED)) { start_recurring_timer(cmd); } cmd_finalize(cmd, rsc); } static void lrmd_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data) { stonith_action_complete(data->userdata, data->rc); } void stonith_connection_failed(void) { GHashTableIter iter; GList *cmd_list = NULL; GList *cmd_iter = NULL; lrmd_rsc_t *rsc = NULL; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) { if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) { /* If we registered this fence device, we don't know whether the * fencer still has the registration or not. Cause future probes to * return PCMK_OCF_UNKNOWN_ERROR until the resource is stopped or * started successfully. This is especially important if the * controller also went away (possibly due to a cluster layer * restart) and won't receive our client notification of any * monitors finalized below. */ if (rsc->st_probe_rc == pcmk_ok) { rsc->st_probe_rc = pcmk_err_generic; } if (rsc->active) { cmd_list = g_list_append(cmd_list, rsc->active); } if (rsc->recurring_ops) { cmd_list = g_list_concat(cmd_list, rsc->recurring_ops); } if (rsc->pending_ops) { cmd_list = g_list_concat(cmd_list, rsc->pending_ops); } rsc->pending_ops = rsc->recurring_ops = NULL; } } if (!cmd_list) { return; } crm_err("Connection to fencer failed, finalizing %d pending operations", g_list_length(cmd_list)); for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) { stonith_action_complete(cmd_iter->data, -ENOTCONN); } g_list_free(cmd_list); } /*! * \internal * \brief Execute a stonith resource "start" action * * Start a stonith resource by registering it with the fencer. * (Stonith agents don't have a start command.) * * \param[in] stonith_api Connection to fencer * \param[in] rsc Stonith resource to start * \param[in] cmd Start command to execute * * \return pcmk_ok on success, -errno otherwise */ static int execd_stonith_start(stonith_t *stonith_api, lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) { char *key = NULL; char *value = NULL; stonith_key_value_t *device_params = NULL; int rc = pcmk_ok; // Convert command parameters to stonith API key/values if (cmd->params) { GHashTableIter iter; g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { device_params = stonith_key_value_add(device_params, key, value); } } /* The fencer will automatically register devices via CIB notifications * when the CIB changes, but to avoid a possible race condition between * the fencer receiving the notification and the executor requesting that * resource, the executor registers the device as well. The fencer knows how * to handle duplicate registrations. */ rc = stonith_api->cmds->register_device(stonith_api, st_opt_sync_call, cmd->rsc_id, rsc->provider, rsc->type, device_params); stonith_key_value_freeall(device_params, 1, 1); return rc; } /*! * \internal * \brief Execute a stonith resource "stop" action * * Stop a stonith resource by unregistering it with the fencer. * (Stonith agents don't have a stop command.) * * \param[in] stonith_api Connection to fencer * \param[in] rsc Stonith resource to stop * * \return pcmk_ok on success, -errno otherwise */ static inline int execd_stonith_stop(stonith_t *stonith_api, const lrmd_rsc_t *rsc) { /* @TODO Failure would indicate a problem communicating with fencer; * perhaps we should try reconnecting and retrying a few times? */ return stonith_api->cmds->remove_device(stonith_api, st_opt_sync_call, rsc->rsc_id); } /*! * \internal * \brief Initiate a stonith resource agent recurring "monitor" action * * \param[in] stonith_api Connection to fencer * \param[in] rsc Stonith resource to monitor * \param[in] cmd Monitor command being executed * * \return pcmk_ok if monitor was successfully initiated, -errno otherwise */ static inline int execd_stonith_monitor(stonith_t *stonith_api, lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) { int rc = stonith_api->cmds->monitor(stonith_api, 0, cmd->rsc_id, cmd->timeout / 1000); rc = stonith_api->cmds->register_callback(stonith_api, rc, 0, 0, cmd, "lrmd_stonith_callback", lrmd_stonith_callback); if (rc == TRUE) { rsc->active = cmd; rc = pcmk_ok; } else { rc = -pcmk_err_generic; } return rc; } static void lrmd_rsc_execute_stonith(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { int rc = 0; bool do_monitor = FALSE; stonith_t *stonith_api = get_stonith_connection(); if (!stonith_api) { rc = -ENOTCONN; } else if (safe_str_eq(cmd->action, "start")) { rc = execd_stonith_start(stonith_api, rsc, cmd); if (rc == 0) { do_monitor = TRUE; } } else if (safe_str_eq(cmd->action, "stop")) { rc = execd_stonith_stop(stonith_api, rsc); } else if (safe_str_eq(cmd->action, "monitor")) { if (cmd->interval_ms > 0) { do_monitor = TRUE; } else { rc = rsc->st_probe_rc; } } if (do_monitor) { rc = execd_stonith_monitor(stonith_api, rsc, cmd); if (rc == pcmk_ok) { // Don't clean up yet, we will find out result of the monitor later return; } } stonith_action_complete(cmd, rc); } static int lrmd_rsc_execute_service_lib(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { svc_action_t *action = NULL; GHashTable *params_copy = NULL; CRM_ASSERT(rsc); CRM_ASSERT(cmd); crm_trace("Creating action, resource:%s action:%s class:%s provider:%s agent:%s", rsc->rsc_id, cmd->action, rsc->class, rsc->provider, rsc->type); #if SUPPORT_NAGIOS /* Recurring operations are cancelled anyway for a stop operation */ if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS) && safe_str_eq(cmd->action, "stop")) { cmd->exec_rc = PCMK_OCF_OK; goto exec_done; } #endif params_copy = crm_str_table_dup(cmd->params); action = resources_action_create(rsc->rsc_id, rsc->class, rsc->provider, rsc->type, normalize_action_name(rsc, cmd->action), cmd->interval_ms, cmd->timeout, params_copy, cmd->service_flags); if (!action) { crm_err("Failed to create action, action:%s on resource %s", cmd->action, rsc->rsc_id); cmd->lrmd_op_status = PCMK_LRM_OP_ERROR; goto exec_done; } action->cb_data = cmd; /* 'cmd' may not be valid after this point if * services_action_async() returned TRUE * * Upstart and systemd both synchronously determine monitor/status * results and call action_complete (which may free 'cmd') if necessary. */ if (services_action_async(action, action_complete)) { return TRUE; } cmd->exec_rc = action->rc; if(action->status != PCMK_LRM_OP_DONE) { cmd->lrmd_op_status = action->status; } else { cmd->lrmd_op_status = PCMK_LRM_OP_ERROR; } services_action_free(action); action = NULL; exec_done: cmd_finalize(cmd, rsc); return TRUE; } static gboolean lrmd_rsc_execute(lrmd_rsc_t * rsc) { lrmd_cmd_t *cmd = NULL; CRM_CHECK(rsc != NULL, return FALSE); if (rsc->active) { crm_trace("%s is still active", rsc->rsc_id); return TRUE; } if (rsc->pending_ops) { GList *first = rsc->pending_ops; cmd = first->data; if (cmd->delay_id) { crm_trace ("Command %s %s was asked to run too early, waiting for start_delay timeout of %dms", cmd->rsc_id, cmd->action, cmd->start_delay); return TRUE; } rsc->pending_ops = g_list_remove_link(rsc->pending_ops, first); g_list_free_1(first); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_run), &(cmd->t_first_run)); #endif cmd->epoch_last_run = time(NULL); } if (!cmd) { crm_trace("Nothing further to do for %s", rsc->rsc_id); return TRUE; } rsc->active = cmd; /* only one op at a time for a rsc */ if (cmd->interval_ms) { rsc->recurring_ops = g_list_append(rsc->recurring_ops, cmd); } log_execute(cmd); if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) { lrmd_rsc_execute_stonith(rsc, cmd); } else { lrmd_rsc_execute_service_lib(rsc, cmd); } return TRUE; } static gboolean lrmd_rsc_dispatch(gpointer user_data) { return lrmd_rsc_execute(user_data); } void free_rsc(gpointer data) { GListPtr gIter = NULL; lrmd_rsc_t *rsc = data; int is_stonith = safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH); gIter = rsc->pending_ops; while (gIter != NULL) { GListPtr next = gIter->next; lrmd_cmd_t *cmd = gIter->data; /* command was never executed */ cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; cmd_finalize(cmd, NULL); gIter = next; } /* frees list, but not list elements. */ g_list_free(rsc->pending_ops); gIter = rsc->recurring_ops; while (gIter != NULL) { GListPtr next = gIter->next; lrmd_cmd_t *cmd = gIter->data; if (is_stonith) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; /* If a stonith command is in-flight, just mark it as cancelled; * it is not safe to finalize/free the cmd until the stonith api * says it has either completed or timed out. */ if (rsc->active != cmd) { cmd_finalize(cmd, NULL); } } else { /* This command is already handed off to service library, * let service library cancel it and tell us via the callback * when it is cancelled. The rsc can be safely destroyed * even if we are waiting for the cancel result */ services_action_cancel(rsc->rsc_id, normalize_action_name(rsc, cmd->action), cmd->interval_ms); } gIter = next; } /* frees list, but not list elements. */ g_list_free(rsc->recurring_ops); free(rsc->rsc_id); free(rsc->class); free(rsc->provider); free(rsc->type); mainloop_destroy_trigger(rsc->work); free(rsc); } static xmlNode * process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id) { xmlNode *reply = NULL; int rc = pcmk_ok; const char *is_ipc_provider = crm_element_value(request, F_LRMD_IS_IPC_PROVIDER); const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION); if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) { crm_err("Cluster API version must be greater than or equal to %s, not %s", LRMD_MIN_PROTOCOL_VERSION, protocol_version); rc = -EPROTO; } reply = create_lrmd_reply(__FUNCTION__, rc, call_id); crm_xml_add(reply, F_LRMD_OPERATION, CRM_OP_REGISTER); crm_xml_add(reply, F_LRMD_CLIENTID, client->id); crm_xml_add(reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); if (crm_is_true(is_ipc_provider)) { // This is a remote connection from a cluster node's controller #ifdef SUPPORT_REMOTE ipc_proxy_add_provider(client); #endif } return reply; } static int process_lrmd_rsc_register(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; lrmd_rsc_t *rsc = build_rsc_from_xml(request); lrmd_rsc_t *dup = g_hash_table_lookup(rsc_list, rsc->rsc_id); if (dup && safe_str_eq(rsc->class, dup->class) && safe_str_eq(rsc->provider, dup->provider) && safe_str_eq(rsc->type, dup->type)) { crm_notice("Ignoring duplicate registration of '%s'", rsc->rsc_id); free_rsc(rsc); return rc; } g_hash_table_replace(rsc_list, rsc->rsc_id, rsc); crm_info("Cached agent information for '%s'", rsc->rsc_id); return rc; } static xmlNode * process_lrmd_get_rsc_info(xmlNode *request, int call_id) { int rc = pcmk_ok; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); xmlNode *reply = NULL; lrmd_rsc_t *rsc = NULL; if (rsc_id == NULL) { rc = -ENODEV; } else { rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Agent information for '%s' not in cache", rsc_id); rc = -ENODEV; } } reply = create_lrmd_reply(__FUNCTION__, rc, call_id); if (rsc) { crm_xml_add(reply, F_LRMD_RSC_ID, rsc->rsc_id); crm_xml_add(reply, F_LRMD_CLASS, rsc->class); crm_xml_add(reply, F_LRMD_PROVIDER, rsc->provider); crm_xml_add(reply, F_LRMD_TYPE, rsc->type); } return reply; } static int process_lrmd_rsc_unregister(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; lrmd_rsc_t *rsc = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); if (!rsc_id) { return -ENODEV; } rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Ignoring unregistration of resource '%s', which is not registered", rsc_id); return pcmk_ok; } if (rsc->active) { /* let the caller know there are still active ops on this rsc to watch for */ crm_trace("Operation (0x%p) still in progress for unregistered resource %s", rsc->active, rsc_id); rc = -EINPROGRESS; } g_hash_table_remove(rsc_list, rsc_id); return rc; } static int process_lrmd_rsc_exec(pcmk__client_t *client, uint32_t id, xmlNode *request) { lrmd_rsc_t *rsc = NULL; lrmd_cmd_t *cmd = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); int call_id; if (!rsc_id) { return -EINVAL; } if (!(rsc = g_hash_table_lookup(rsc_list, rsc_id))) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); return -ENODEV; } cmd = create_lrmd_cmd(request, client); call_id = cmd->call_id; /* Don't reference cmd after handing it off to be scheduled. * The cmd could get merged and freed. */ schedule_lrmd_cmd(rsc, cmd); return call_id; } static int cancel_op(const char *rsc_id, const char *action, guint interval_ms) { GListPtr gIter = NULL; lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, rsc_id); /* How to cancel an action. * 1. Check pending ops list, if it hasn't been handed off * to the service library or stonith recurring list remove * it there and that will stop it. * 2. If it isn't in the pending ops list, then it's either a * recurring op in the stonith recurring list, or the service * library's recurring list. Stop it there * 3. If not found in any lists, then this operation has either * been executed already and is not a recurring operation, or * never existed. */ if (!rsc) { return -ENODEV; } for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) { lrmd_cmd_t *cmd = gIter->data; if (safe_str_eq(cmd->action, action) && (cmd->interval_ms == interval_ms)) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; cmd_finalize(cmd, rsc); return pcmk_ok; } } if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) { /* The service library does not handle stonith operations. * We have to handle recurring stonith operations ourselves. */ for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) { lrmd_cmd_t *cmd = gIter->data; if (safe_str_eq(cmd->action, action) && (cmd->interval_ms == interval_ms)) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; if (rsc->active != cmd) { cmd_finalize(cmd, rsc); } return pcmk_ok; } } } else if (services_action_cancel(rsc_id, normalize_action_name(rsc, action), interval_ms) == TRUE) { /* The service library will tell the action_complete callback function * this action was cancelled, which will destroy the cmd and remove * it from the recurring_op list. Do not do that in this function * if the service library says it cancelled it. */ return pcmk_ok; } return -EOPNOTSUPP; } static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id) { GList *cmd_list = NULL; GList *cmd_iter = NULL; /* Notice a copy of each list is created when concat is called. * This prevents odd behavior from occurring when the cmd_list * is iterated through later on. It is possible the cancel_op * function may end up modifying the recurring_ops and pending_ops * lists. If we did not copy those lists, our cmd_list iteration * could get messed up.*/ if (rsc->recurring_ops) { cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->recurring_ops)); } if (rsc->pending_ops) { cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->pending_ops)); } if (!cmd_list) { return; } for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) { lrmd_cmd_t *cmd = cmd_iter->data; if (cmd->interval_ms == 0) { continue; } if (client_id && safe_str_neq(cmd->client_id, client_id)) { continue; } cancel_op(rsc->rsc_id, cmd->action, cmd->interval_ms); } /* frees only the copied list data, not the cmds */ g_list_free(cmd_list); } static int process_lrmd_rsc_cancel(pcmk__client_t *client, uint32_t id, xmlNode *request) { xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); const char *action = crm_element_value(rsc_xml, F_LRMD_RSC_ACTION); guint interval_ms = 0; crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &interval_ms); if (!rsc_id || !action) { return -EINVAL; } return cancel_op(rsc_id, action, interval_ms); } static void add_recurring_op_xml(xmlNode *reply, lrmd_rsc_t *rsc) { xmlNode *rsc_xml = create_xml_node(reply, F_LRMD_RSC); crm_xml_add(rsc_xml, F_LRMD_RSC_ID, rsc->rsc_id); for (GList *item = rsc->recurring_ops; item != NULL; item = item->next) { lrmd_cmd_t *cmd = item->data; xmlNode *op_xml = create_xml_node(rsc_xml, T_LRMD_RSC_OP); crm_xml_add(op_xml, F_LRMD_RSC_ACTION, (cmd->real_action? cmd->real_action : cmd->action)); crm_xml_add_ms(op_xml, F_LRMD_RSC_INTERVAL, cmd->interval_ms); crm_xml_add_int(op_xml, F_LRMD_TIMEOUT, cmd->timeout_orig); } } static xmlNode * process_lrmd_get_recurring(xmlNode *request, int call_id) { int rc = pcmk_ok; const char *rsc_id = NULL; lrmd_rsc_t *rsc = NULL; xmlNode *reply = NULL; xmlNode *rsc_xml = NULL; // Resource ID is optional rsc_xml = first_named_child(request, F_LRMD_CALLDATA); if (rsc_xml) { rsc_xml = first_named_child(rsc_xml, F_LRMD_RSC); } if (rsc_xml) { rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); } // If resource ID is specified, resource must exist if (rsc_id != NULL) { rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); rc = -ENODEV; } } reply = create_lrmd_reply(__FUNCTION__, rc, call_id); // If resource ID is not specified, check all resources if (rsc_id == NULL) { GHashTableIter iter; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &rsc)) { add_recurring_op_xml(reply, rsc); } } else if (rsc) { add_recurring_op_xml(reply, rsc); } return reply; } void process_lrmd_message(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; int call_id = 0; const char *op = crm_element_value(request, F_LRMD_OPERATION); int do_reply = 0; int do_notify = 0; xmlNode *reply = NULL; crm_trace("Processing %s operation from %s", op, client->id); crm_element_value_int(request, F_LRMD_CALLID, &call_id); if (crm_str_eq(op, CRM_OP_IPC_FWD, TRUE)) { #ifdef SUPPORT_REMOTE ipc_proxy_forward_client(client, request); #endif do_reply = 1; } else if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { reply = process_lrmd_signon(client, request, call_id); do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_RSC_REG, TRUE)) { rc = process_lrmd_rsc_register(client, id, request); do_notify = 1; do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_RSC_INFO, TRUE)) { reply = process_lrmd_get_rsc_info(request, call_id); do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_RSC_UNREG, TRUE)) { rc = process_lrmd_rsc_unregister(client, id, request); /* don't notify anyone about failed un-registers */ if (rc == pcmk_ok || rc == -EINPROGRESS) { do_notify = 1; } do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_RSC_EXEC, TRUE)) { rc = process_lrmd_rsc_exec(client, id, request); do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_RSC_CANCEL, TRUE)) { rc = process_lrmd_rsc_cancel(client, id, request); do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_POKE, TRUE)) { do_notify = 1; do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_CHECK, TRUE)) { xmlNode *data = get_message_xml(request, F_LRMD_CALLDATA); const char *timeout = crm_element_value(data, F_LRMD_WATCHDOG); CRM_LOG_ASSERT(data != NULL); pcmk__valid_sbd_timeout(timeout); } else if (crm_str_eq(op, LRMD_OP_ALERT_EXEC, TRUE)) { rc = process_lrmd_alert_exec(client, id, request); do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_GET_RECURRING, TRUE)) { reply = process_lrmd_get_recurring(request, call_id); do_reply = 1; } else { rc = -EOPNOTSUPP; do_reply = 1; crm_err("Unknown IPC request '%s' from %s", op, client->name); } crm_debug("Processed %s operation from %s: rc=%d, reply=%d, notify=%d", op, client->id, rc, do_reply, do_notify); if (do_reply) { int send_rc = pcmk_rc_ok; if (reply == NULL) { reply = create_lrmd_reply(__FUNCTION__, rc, call_id); } send_rc = lrmd_server_send_reply(client, id, reply); free_xml(reply); if (send_rc != pcmk_rc_ok) { crm_warn("Reply to client %s failed: %s " CRM_XS " %d", client->name, pcmk_rc_str(send_rc), send_rc); } } if (do_notify) { send_generic_notify(rc, request); } } diff --git a/daemons/fenced/Makefile.am b/daemons/fenced/Makefile.am index a962fb449b..43413e11db 100644 --- a/daemons/fenced/Makefile.am +++ b/daemons/fenced/Makefile.am @@ -1,54 +1,55 @@ # # Original Author: Sun Jiang Dong # Copyright 2004 International Business Machines # # with later changes copyright 2004-2019 the Pacemaker project contributors. # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk halibdir = $(CRM_DAEMON_DIR) halib_PROGRAMS = pacemaker-fenced cts-fence-helper sbin_SCRIPTS = fence_legacy noinst_HEADERS = pacemaker-fenced.h if BUILD_XML_HELP man7_MANS = pacemaker-fenced.7 endif EXTRA_DIST = README.md cts_fence_helper_SOURCES = cts-fence-helper.c cts_fence_helper_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/fencing/libstonithd.la pacemaker_fenced_YFLAGS = -d pacemaker_fenced_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_fenced_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemaker_fenced_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/cluster/libcrmcluster.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/pacemaker/libpacemaker.la \ $(CLUSTERLIBS) pacemaker_fenced_SOURCES = pacemaker-fenced.c \ fenced_commands.c \ fenced_remote.c \ fenced_history.c CLEANFILES = $(man7_MANS) $(man8_MANS) if BUILD_LEGACY_LINKS install-exec-hook: cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f stonithd && $(LN_S) pacemaker-fenced stonithd uninstall-hook: cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f stonithd endif diff --git a/daemons/fenced/fenced_history.c b/daemons/fenced/fenced_history.c index 445cfd216d..710d6fe50f 100644 --- a/daemons/fenced/fenced_history.c +++ b/daemons/fenced/fenced_history.c @@ -1,489 +1,490 @@ /* * Copyright 2009-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_STONITH_HISTORY 500 /*! * \internal * \brief Send a broadcast to all nodes to trigger cleanup or * history synchronisation * * \param[in] history Optional history to be attached * \param[in] callopts We control cleanup via a flag in the callopts * \param[in] target Cleanup can be limited to certain fence-targets */ static void stonith_send_broadcast_history(xmlNode *history, int callopts, const char *target) { xmlNode *bcast = create_xml_node(NULL, "stonith_command"); xmlNode *data = create_xml_node(NULL, __FUNCTION__); if (target) { crm_xml_add(data, F_STONITH_TARGET, target); } crm_xml_add(bcast, F_TYPE, T_STONITH_NG); crm_xml_add(bcast, F_SUBTYPE, "broadcast"); crm_xml_add(bcast, F_STONITH_OPERATION, STONITH_OP_FENCE_HISTORY); crm_xml_add_int(bcast, F_STONITH_CALLOPTS, callopts); if (history) { add_node_copy(data, history); } add_message_xml(bcast, F_STONITH_CALLDATA, data); send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE); free_xml(data); free_xml(bcast); } static gboolean stonith_remove_history_entry (gpointer key, gpointer value, gpointer user_data) { remote_fencing_op_t *op = value; const char *target = (const char *) user_data; if ((op->state == st_failed) || (op->state == st_done)) { if ((target) && (strcmp(op->target, target) != 0)) { return FALSE; } return TRUE; } return FALSE; /* don't clean pending operations */ } /*! * \internal * \brief Send out a cleanup broadcast or do a local history-cleanup * * \param[in] target Cleanup can be limited to certain fence-targets * \param[in] broadcast Send out a cleanup broadcast */ static void stonith_fence_history_cleanup(const char *target, gboolean broadcast) { if (broadcast) { stonith_send_broadcast_history(NULL, st_opt_cleanup | st_opt_discard_reply, target); /* we'll do the local clean when we receive back our own broadcast */ } else if (stonith_remote_op_list) { g_hash_table_foreach_remove(stonith_remote_op_list, stonith_remove_history_entry, (gpointer) target); do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL); } } /* keeping the length of fence-history within bounds * ================================================= * * If things are really running wild a lot of fencing-attempts * might fill up the hash-map, eventually using up a lot * of memory and creating huge history-sync messages. * Before the history being synced across nodes at least * the reboot of a cluster-node helped keeping the * history within bounds even though not in a reliable * manner. * * stonith_remote_op_list isn't sorted for time-stamps * thus it would be kind of expensive to delete e.g. * the oldest entry if it would grow past MAX_STONITH_HISTORY * entries. * It is more efficient to purge MAX_STONITH_HISTORY/2 * entries whenever the list grows beyond MAX_STONITH_HISTORY. * (sort for age + purge the MAX_STONITH_HISTORY/2 oldest) * That done on a per-node-base might raise the * probability of large syncs to occur. * Things like introducing a broadcast to purge * MAX_STONITH_HISTORY/2 entries or not sync above a certain * threshold coming to mind ... * Simplest thing though is to purge the full history * throughout the cluster once MAX_STONITH_HISTORY is reached. * On the other hand this leads to purging the history in * situations where it would be handy to have it probably. */ static int op_time_sort(const void *a_voidp, const void *b_voidp) { const remote_fencing_op_t **a = (const remote_fencing_op_t **) a_voidp; const remote_fencing_op_t **b = (const remote_fencing_op_t **) b_voidp; gboolean a_pending = ((*a)->state != st_failed) && ((*a)->state != st_done); gboolean b_pending = ((*b)->state != st_failed) && ((*b)->state != st_done); if (a_pending && b_pending) { return 0; } else if (a_pending) { return -1; } else if (b_pending) { return 1; } else if ((*b)->completed == (*a)->completed) { return 0; } else if ((*b)->completed > (*a)->completed) { return 1; } return -1; } /*! * \internal * \brief Do a local history-trim to MAX_STONITH_HISTORY / 2 entries * once over MAX_STONITH_HISTORY */ void stonith_fence_history_trim(void) { guint num_ops; if (!stonith_remote_op_list) { return; } num_ops = g_hash_table_size(stonith_remote_op_list); if (num_ops > MAX_STONITH_HISTORY) { remote_fencing_op_t *ops[num_ops]; remote_fencing_op_t *op = NULL; GHashTableIter iter; int i; crm_trace("Fencing History growing beyond limit of %d so purge " "half of failed/successful attempts", MAX_STONITH_HISTORY); /* write all ops into an array */ i = 0; g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) { ops[i++] = op; } /* run quicksort over the array so that we get pending ops * first and then sorted most recent to oldest */ qsort(ops, num_ops, sizeof(remote_fencing_op_t *), op_time_sort); /* purgest oldest half of the history entries */ for (i = MAX_STONITH_HISTORY / 2; i < num_ops; i++) { /* keep pending ops even if they shouldn't fill more than * half of our buffer */ if ((ops[i]->state == st_failed) || (ops[i]->state == st_done)) { g_hash_table_remove(stonith_remote_op_list, ops[i]->id); } } /* we've just purged valid data from the list so there is no need * to create a notification - if displayed it can stay */ } } /*! * \internal * \brief Convert xml fence-history to a hash-table like stonith_remote_op_list * * \param[in] history Fence-history in xml * * \return Fence-history as hash-table */ static GHashTable * stonith_xml_history_to_list(xmlNode *history) { xmlNode *xml_op = NULL; GHashTable *rv = NULL; init_stonith_remote_op_hash_table(&rv); CRM_LOG_ASSERT(rv != NULL); for (xml_op = __xml_first_child(history); xml_op != NULL; xml_op = __xml_next(xml_op)) { remote_fencing_op_t *op = NULL; char *id = crm_element_value_copy(xml_op, F_STONITH_REMOTE_OP_ID); - int completed, state; + int state; + long long completed; if (!id) { crm_warn("History to convert to hashtable has no id in entry"); continue; } crm_trace("Attaching op %s to hashtable", id); op = calloc(1, sizeof(remote_fencing_op_t)); op->id = id; op->target = crm_element_value_copy(xml_op, F_STONITH_TARGET); op->action = crm_element_value_copy(xml_op, F_STONITH_ACTION); op->originator = crm_element_value_copy(xml_op, F_STONITH_ORIGIN); op->delegate = crm_element_value_copy(xml_op, F_STONITH_DELEGATE); op->client_name = crm_element_value_copy(xml_op, F_STONITH_CLIENTNAME); - crm_element_value_int(xml_op, F_STONITH_DATE, &completed); + crm_element_value_ll(xml_op, F_STONITH_DATE, &completed); op->completed = (time_t) completed; crm_element_value_int(xml_op, F_STONITH_STATE, &state); op->state = (enum op_state) state; g_hash_table_replace(rv, id, op); CRM_LOG_ASSERT(g_hash_table_lookup(rv, id) != NULL); } return rv; } /*! * \internal * \brief Craft xml difference between local fence-history and a history * coming from remote * * \param[in] remote_history Fence-history as hash-table (may be NULL) * \param[in] add_id If crafting the answer for an API * history-request there is no need for the id * \param[in] target Optionally limit to certain fence-target * * \return The fence-history as xml */ static xmlNode * stonith_local_history_diff(GHashTable *remote_history, gboolean add_id, const char *target) { xmlNode *history = NULL; int cnt = 0; if (stonith_remote_op_list) { GHashTableIter iter; remote_fencing_op_t *op = NULL; history = create_xml_node(NULL, F_STONITH_HISTORY_LIST); g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) { xmlNode *entry = NULL; if (remote_history && g_hash_table_lookup(remote_history, op->id)) { continue; /* skip entries broadcasted already */ } if (target && strcmp(op->target, target) != 0) { continue; } cnt++; crm_trace("Attaching op %s", op->id); entry = create_xml_node(history, STONITH_OP_EXEC); if (add_id) { crm_xml_add(entry, F_STONITH_REMOTE_OP_ID, op->id); } crm_xml_add(entry, F_STONITH_TARGET, op->target); crm_xml_add(entry, F_STONITH_ACTION, op->action); crm_xml_add(entry, F_STONITH_ORIGIN, op->originator); crm_xml_add(entry, F_STONITH_DELEGATE, op->delegate); crm_xml_add(entry, F_STONITH_CLIENTNAME, op->client_name); - crm_xml_add_int(entry, F_STONITH_DATE, op->completed); + crm_xml_add_ll(entry, F_STONITH_DATE, op->completed); crm_xml_add_int(entry, F_STONITH_STATE, op->state); } } if (cnt == 0) { free_xml(history); return NULL; } else { return history; } } /*! * \internal * \brief Merge fence-history coming from remote into local history * * \param[in] history Hash-table holding remote history to be merged in */ static void stonith_merge_in_history_list(GHashTable *history) { GHashTableIter iter; remote_fencing_op_t *op = NULL; gboolean updated = FALSE; if (!history) { return; } init_stonith_remote_op_hash_table(&stonith_remote_op_list); g_hash_table_iter_init(&iter, history); while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) { remote_fencing_op_t *stored_op = g_hash_table_lookup(stonith_remote_op_list, op->id); if (stored_op) { continue; // Skip existent (@TODO state-merging might be desirable) } updated = TRUE; g_hash_table_iter_steal(&iter); if ((op->state != st_failed) && (op->state != st_done) && safe_str_eq(op->originator, stonith_our_uname)) { crm_warn("received pending action we are supposed to be the " "owner but it's not in our records -> fail it"); op->state = st_failed; op->completed = time(NULL); /* use -EHOSTUNREACH to not introduce a new return-code that might trigger unexpected results at other places and to prevent remote_op_done from setting the delegate if not present */ stonith_bcast_result_to_peers(op, -EHOSTUNREACH, FALSE); } g_hash_table_insert(stonith_remote_op_list, op->id, op); /* we could trim the history here but if we bail * out after trim we might miss more recent entries * of those that might still be in the list * if we don't bail out trimming once is more * efficient and memory overhead is minimal as * we are just moving pointers from one hash to * another */ } stonith_fence_history_trim(); if (updated) { do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL); } g_hash_table_destroy(history); /* remove what is left */ } /*! * \internal * \brief Handle fence-history messages (either from API or coming in as * broadcasts * * \param[in] msg Request message * \param[in] output In case of a request from the API used to craft * a reply from * \param[in] remote_peer * \param[in] options call-options from the request * * \return always success as there is actully nothing that can go really wrong */ int stonith_fence_history(xmlNode *msg, xmlNode **output, const char *remote_peer, int options) { int rc = 0; const char *target = NULL; xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_NEVER); xmlNode *out_history = NULL; if (dev) { target = crm_element_value(dev, F_STONITH_TARGET); if (target && (options & st_opt_cs_nodeid)) { int nodeid = crm_atoi(target, NULL); crm_node_t *node = crm_find_known_peer_full(nodeid, NULL, CRM_GET_PEER_ANY); if (node) { target = node->uname; } } } if (options & st_opt_cleanup) { crm_trace("Cleaning up operations on %s in %p", target, stonith_remote_op_list); stonith_fence_history_cleanup(target, crm_element_value(msg, F_STONITH_CALLID) != NULL); } else if (options & st_opt_broadcast) { /* there is no clear sign atm for when a history sync is done so send a notification for anything that smells like history-sync */ do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY_SYNCED, 0, NULL); if (crm_element_value(msg, F_STONITH_CALLID)) { /* this is coming from the stonith-API * * craft a broadcast with node's history * so that every node can merge and broadcast * what it has on top */ out_history = stonith_local_history_diff(NULL, TRUE, NULL); crm_trace("Broadcasting history to peers"); stonith_send_broadcast_history(out_history, st_opt_broadcast | st_opt_discard_reply, NULL); } else if (remote_peer && !safe_str_eq(remote_peer, stonith_our_uname)) { xmlNode *history = get_xpath_object("//" F_STONITH_HISTORY_LIST, msg, LOG_NEVER); GHashTable *received_history = history?stonith_xml_history_to_list(history):NULL; /* either a broadcast created directly upon stonith-API request * or a diff as response to such a thing * * in both cases it may have a history or not * if we have differential data * merge in what we've received and stop * otherwise broadcast what we have on top * marking as differential and merge in afterwards */ if (!history || !crm_is_true(crm_element_value(history, F_STONITH_DIFFERENTIAL))) { out_history = stonith_local_history_diff(received_history, TRUE, NULL); if (out_history) { crm_trace("Broadcasting history-diff to peers"); crm_xml_add(out_history, F_STONITH_DIFFERENTIAL, XML_BOOLEAN_TRUE); stonith_send_broadcast_history(out_history, st_opt_broadcast | st_opt_discard_reply, NULL); } else { crm_trace("History-diff is empty - skip broadcast"); } } stonith_merge_in_history_list(received_history); } else { crm_trace("Skipping history-query-broadcast (%s%s)" " we sent ourselves", remote_peer?"remote-peer=":"local-ipc", remote_peer?remote_peer:""); } } else { /* plain history request */ crm_trace("Looking for operations on %s in %p", target, stonith_remote_op_list); *output = stonith_local_history_diff(NULL, FALSE, target); } free_xml(out_history); return rc; } diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c index 4f1c50ce85..e974642604 100644 --- a/daemons/fenced/pacemaker-fenced.c +++ b/daemons/fenced/pacemaker-fenced.c @@ -1,1538 +1,1529 @@ /* * Copyright 2009-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include /* U32T ~ PRIu32, X32T ~ PRIx32 */ #include #include #include #include #include #include #include #include #include #include #include #include #include char *stonith_our_uname = NULL; char *stonith_our_uuid = NULL; long stonith_watchdog_timeout_ms = 0; static GMainLoop *mainloop = NULL; gboolean stand_alone = FALSE; static gboolean no_cib_connect = FALSE; static gboolean stonith_shutdown_flag = FALSE; static qb_ipcs_service_t *ipcs = NULL; static xmlNode *local_cib = NULL; static pe_working_set_t *fenced_data_set = NULL; static cib_t *cib_api = NULL; -static void *cib_library = NULL; static void stonith_shutdown(int nsig); static void stonith_cleanup(void); static int32_t st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { if (stonith_shutdown_flag) { crm_info("Ignoring new client [%d] during shutdown", pcmk__client_pid(c)); return -EPERM; } if (pcmk__new_client(c, uid, gid) == NULL) { return -EIO; } return 0; } /* Exit code means? */ static int32_t st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; int call_options = 0; xmlNode *request = NULL; pcmk__client_t *c = pcmk__find_client(qbc); const char *op = NULL; if (c == NULL) { crm_info("Invalid client: %p", qbc); return 0; } request = pcmk__client_data2xml(c, data, &id, &flags); if (request == NULL) { pcmk__ipc_send_ack(c, id, flags, "nack"); return 0; } op = crm_element_value(request, F_CRM_TASK); if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) { crm_xml_add(request, F_TYPE, T_STONITH_NG); crm_xml_add(request, F_STONITH_OPERATION, op); crm_xml_add(request, F_STONITH_CLIENTID, c->id); crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c)); crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname); send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE); free_xml(request); return 0; } if (c->name == NULL) { const char *value = crm_element_value(request, F_STONITH_CLIENTNAME); if (value == NULL) { value = "unknown"; } c->name = crm_strdup_printf("%s.%u", value, c->pid); } crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); crm_trace("Flags %" X32T "/%u for command %" U32T " from %s", flags, call_options, id, pcmk__client_name(c)); if (is_set(call_options, st_opt_sync_call)) { CRM_ASSERT(flags & crm_ipc_client_response); CRM_LOG_ASSERT(c->request_id == 0); /* This means the client has two synchronous events in-flight */ c->request_id = id; /* Reply only to the last one */ } crm_xml_add(request, F_STONITH_CLIENTID, c->id); crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c)); crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname); stonith_command(c, id, flags, request, NULL); free_xml(request); return 0; } /* Error code means? */ static int32_t st_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p closed", c); pcmk__free_client(client); /* 0 means: yes, go ahead and destroy the connection */ return 0; } static void st_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p destroyed", c); st_ipc_closed(c); } static void stonith_peer_callback(xmlNode * msg, void *private_data) { const char *remote_peer = crm_element_value(msg, F_ORIG); const char *op = crm_element_value(msg, F_STONITH_OPERATION); if (crm_str_eq(op, "poke", TRUE)) { return; } crm_log_xml_trace(msg, "Peer[inbound]"); stonith_command(NULL, 0, 0, msg, remote_peer); } #if SUPPORT_COROSYNC static void stonith_peer_ais_callback(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { xml = string2xml(data); if (xml == NULL) { crm_err("Invalid XML: '%.120s'", data); free(data); return; } crm_xml_add(xml, F_ORIG, from); /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */ stonith_peer_callback(xml, NULL); } free_xml(xml); free(data); return; } static void stonith_peer_cs_destroy(gpointer user_data) { crm_crit("Lost connection to cluster layer, shutting down"); stonith_shutdown(0); } #endif void do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { /* send callback to originating child */ pcmk__client_t *client_obj = NULL; int local_rc = pcmk_rc_ok; crm_trace("Sending response"); client_obj = pcmk__find_client_by_id(client_id); crm_trace("Sending callback to request originator"); if (client_obj == NULL) { local_rc = EPROTO; crm_trace("No client to sent the response to. F_STONITH_CLIENTID not set."); } else { int rid = 0; if (sync_reply) { CRM_LOG_ASSERT(client_obj->request_id); rid = client_obj->request_id; client_obj->request_id = 0; crm_trace("Sending response %d to %s %s", rid, client_obj->name, from_peer ? "(originator of delegated request)" : ""); } else { crm_trace("Sending an event to %s %s", client_obj->name, from_peer ? "(originator of delegated request)" : ""); } local_rc = pcmk__ipc_send_xml(client_obj, rid, notify_src, (sync_reply? crm_ipc_flags_none : crm_ipc_server_event)); } if ((local_rc != pcmk_rc_ok) && (client_obj != NULL)) { crm_warn("%s reply to %s failed: %s", (sync_reply? "Synchronous" : "Asynchronous"), (client_obj? client_obj->name : "unknown client"), pcmk_rc_str(local_rc)); } } long long get_stonith_flag(const char *name) { if (safe_str_eq(name, T_STONITH_NOTIFY_FENCE)) { return st_callback_notify_fence; } else if (safe_str_eq(name, STONITH_OP_DEVICE_ADD)) { return st_callback_device_add; } else if (safe_str_eq(name, STONITH_OP_DEVICE_DEL)) { return st_callback_device_del; } else if (safe_str_eq(name, T_STONITH_NOTIFY_HISTORY)) { return st_callback_notify_history; } else if (safe_str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED)) { return st_callback_notify_history_synced; } return st_callback_unknown; } static void stonith_notify_client(gpointer key, gpointer value, gpointer user_data) { xmlNode *update_msg = user_data; pcmk__client_t *client = value; const char *type = NULL; CRM_CHECK(client != NULL, return); CRM_CHECK(update_msg != NULL, return); type = crm_element_value(update_msg, F_SUBTYPE); CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return); if (client->ipcs == NULL) { crm_trace("Skipping client with NULL channel"); return; } if (client->options & get_stonith_flag(type)) { int rc = pcmk__ipc_send_xml(client, 0, update_msg, crm_ipc_server_event|crm_ipc_server_error); if (rc != pcmk_rc_ok) { crm_warn("%s notification of client %s failed: %s " CRM_XS " id=%.8s rc=%d", type, pcmk__client_name(client), pcmk_rc_str(rc), client->id, rc); } else { crm_trace("Sent %s notification to client %s.%.6s", type, pcmk__client_name(client), client->id); } } } void do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout) { pcmk__client_t *client = NULL; xmlNode *notify_data = NULL; if (!timeout || !call_id || !client_id) { return; } client = pcmk__find_client_by_id(client_id); if (!client) { return; } notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE); crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE); crm_xml_add(notify_data, F_STONITH_CALLID, call_id); crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout); crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id); if (client) { pcmk__ipc_send_xml(client, 0, notify_data, crm_ipc_server_event); } free_xml(notify_data); } void do_stonith_notify(int options, const char *type, int result, xmlNode * data) { /* TODO: Standardize the contents of data */ xmlNode *update_msg = create_xml_node(NULL, "notify"); CRM_CHECK(type != NULL,;); crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY); crm_xml_add(update_msg, F_SUBTYPE, type); crm_xml_add(update_msg, F_STONITH_OPERATION, type); crm_xml_add_int(update_msg, F_STONITH_RC, result); if (data != NULL) { add_message_xml(update_msg, F_STONITH_CALLDATA, data); } crm_trace("Notifying clients"); pcmk__foreach_ipc_client(stonith_notify_client, update_msg); free_xml(update_msg); crm_trace("Notify complete"); } static void do_stonith_notify_config(int options, const char *op, int rc, const char *desc, int active) { xmlNode *notify_data = create_xml_node(NULL, op); CRM_CHECK(notify_data != NULL, return); crm_xml_add(notify_data, F_STONITH_DEVICE, desc); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active); do_stonith_notify(options, op, rc, notify_data); free_xml(notify_data); } void do_stonith_notify_device(int options, const char *op, int rc, const char *desc) { do_stonith_notify_config(options, op, rc, desc, g_hash_table_size(device_list)); } void do_stonith_notify_level(int options, const char *op, int rc, const char *desc) { do_stonith_notify_config(options, op, rc, desc, g_hash_table_size(topology)); } static void topology_remove_helper(const char *node, int level) { int rc; char *desc = NULL; xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__); crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); rc = stonith_level_remove(data, &desc); do_stonith_notify_level(0, STONITH_OP_LEVEL_DEL, rc, desc); free_xml(data); free(desc); } static void remove_cib_device(xmlXPathObjectPtr xpathObj) { int max = numXpathResults(xpathObj), lpc = 0; for (lpc = 0; lpc < max; lpc++) { const char *rsc_id = NULL; const char *standard = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match != NULL) { standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); } if (safe_str_neq(standard, PCMK_RESOURCE_CLASS_STONITH)) { continue; } rsc_id = crm_element_value(match, XML_ATTR_ID); stonith_device_remove(rsc_id, TRUE); } } static void handle_topology_change(xmlNode *match, bool remove) { int rc; char *desc = NULL; CRM_CHECK(match != NULL, return); crm_trace("Updating %s", ID(match)); if(remove) { int index = 0; char *key = stonith_level_key(match, -1); crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); topology_remove_helper(key, index); free(key); } rc = stonith_level_register(match, &desc); do_stonith_notify_level(0, STONITH_OP_LEVEL_ADD, rc, desc); free(desc); } static void remove_fencing_topology(xmlXPathObjectPtr xpathObj) { int max = numXpathResults(xpathObj), lpc = 0; for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if (match && crm_element_value(match, XML_DIFF_MARKER)) { /* Deletion */ int index = 0; char *target = stonith_level_key(match, -1); crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); if (target == NULL) { crm_err("Invalid fencing target in element %s", ID(match)); } else if (index <= 0) { crm_err("Invalid level for %s in element %s", target, ID(match)); } else { topology_remove_helper(target, index); } /* } else { Deal with modifications during the 'addition' stage */ } } } static void register_fencing_topology(xmlXPathObjectPtr xpathObj) { int max = numXpathResults(xpathObj), lpc = 0; for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); handle_topology_change(match, TRUE); } } /* Fencing */ static void fencing_topology_init() { xmlXPathObjectPtr xpathObj = NULL; const char *xpath = "//" XML_TAG_FENCING_LEVEL; crm_trace("Full topology refresh"); free_topology_list(); init_topology_list(); /* Grab everything */ xpathObj = xpath_search(local_cib, xpath); register_fencing_topology(xpathObj); freeXpathObject(xpathObj); } #define rsc_name(x) x->clone_name?x->clone_name:x->id /*! * \internal * \brief Check whether our uname is in a resource's allowed node list * * \param[in] rsc Resource to check * * \return Pointer to node object if found, NULL otherwise */ static pe_node_t * our_node_allowed_for(pe_resource_t *rsc) { GHashTableIter iter; pe_node_t *node = NULL; if (rsc && stonith_our_uname) { g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if (node && strcmp(node->details->uname, stonith_our_uname) == 0) { break; } node = NULL; } } return node; } /*! * \internal * \brief If a resource or any of its children are STONITH devices, update their * definitions given a cluster working set. * * \param[in] rsc Resource to check * \param[in] data_set Cluster working set with device information */ static void cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set) { pe_node_t *node = NULL; const char *value = NULL; const char *rclass = NULL; pe_node_t *parent = NULL; gboolean remove = TRUE; /* If this is a complex resource, check children rather than this resource itself. * TODO: Mark each installed device and remove if untouched when this process finishes. */ if(rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { cib_device_update(gIter->data, data_set); if(pe_rsc_is_clone(rsc)) { crm_trace("Only processing one copy of the clone %s", rsc->id); break; } } return; } /* We only care about STONITH resources. */ rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if (safe_str_neq(rclass, PCMK_RESOURCE_CLASS_STONITH)) { return; } /* If this STONITH resource is disabled, just remove it. */ if (pe__resource_is_disabled(rsc)) { crm_info("Device %s has been disabled", rsc->id); goto update_done; } /* Check whether our node is allowed for this resource (and its parent if in a group) */ node = our_node_allowed_for(rsc); if (rsc->parent && (rsc->parent->variant == pe_group)) { parent = our_node_allowed_for(rsc->parent); } if(node == NULL) { /* Our node is disallowed, so remove the device */ GHashTableIter iter; crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { crm_trace("Available: %s = %d", node->details->uname, node->weight); } goto update_done; } else if(node->weight < 0 || (parent && parent->weight < 0)) { /* Our node (or its group) is disallowed by score, so remove the device */ char *score = score2char((node->weight < 0) ? node->weight : parent->weight); crm_info("Device %s has been disabled on %s: score=%s", rsc->id, stonith_our_uname, score); free(score); goto update_done; } else { /* Our node is allowed, so update the device information */ int rc; xmlNode *data; GHashTableIter gIter; stonith_key_value_t *params = NULL; const char *name = NULL; const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE); const char *rsc_provides = NULL; crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight); get_rsc_attributes(rsc->parameters, rsc, node, data_set); get_meta_attributes(rsc->meta, rsc, node, data_set); rsc_provides = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROVIDES); g_hash_table_iter_init(&gIter, rsc->parameters); while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) { if (!name || !value) { continue; } params = stonith_key_value_add(params, name, value); crm_trace(" %s=%s", name, value); } remove = FALSE; data = create_device_registration_xml(rsc_name(rsc), st_namespace_any, agent, params, rsc_provides); stonith_key_value_freeall(params, 1, 1); rc = stonith_device_register(data, NULL, TRUE); CRM_ASSERT(rc == pcmk_ok); free_xml(data); } update_done: if(remove && g_hash_table_lookup(device_list, rsc_name(rsc))) { stonith_device_remove(rsc_name(rsc), TRUE); } } /*! * \internal * \brief Update all STONITH device definitions based on current CIB */ static void cib_devices_update(void) { GListPtr gIter = NULL; crm_info("Updating devices to version %s.%s.%s", crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN), crm_element_value(local_cib, XML_ATTR_GENERATION), crm_element_value(local_cib, XML_ATTR_NUMUPDATES)); CRM_ASSERT(fenced_data_set != NULL); fenced_data_set->input = local_cib; fenced_data_set->now = crm_time_new(NULL); fenced_data_set->flags |= pe_flag_quick_location; fenced_data_set->localhost = stonith_our_uname; cluster_status(fenced_data_set); pcmk__schedule_actions(fenced_data_set, NULL, NULL); for (gIter = fenced_data_set->resources; gIter != NULL; gIter = gIter->next) { cib_device_update(gIter->data, fenced_data_set); } fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it pe_reset_working_set(fenced_data_set); } static void update_cib_stonith_devices_v2(const char *event, xmlNode * msg) { xmlNode *change = NULL; char *reason = NULL; bool needs_update = FALSE; xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) { const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); const char *shortpath = NULL; if ((op == NULL) || (strcmp(op, "move") == 0) || strstr(xpath, "/"XML_CIB_TAG_STATUS)) { continue; } else if (safe_str_eq(op, "delete") && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) { const char *rsc_id = NULL; char *search = NULL; char *mutable = NULL; if (strstr(xpath, XML_TAG_ATTR_SETS) || strstr(xpath, XML_TAG_META_SETS)) { needs_update = TRUE; reason = strdup("(meta) attribute deleted from resource"); break; } mutable = strdup(xpath); rsc_id = strstr(mutable, "primitive[@id=\'"); if (rsc_id != NULL) { rsc_id += strlen("primitive[@id=\'"); search = strchr(rsc_id, '\''); } if (search != NULL) { *search = 0; stonith_device_remove(rsc_id, TRUE); } else { crm_warn("Ignoring malformed CIB update (resource deletion)"); } free(mutable); } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) || strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) || strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) { shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath); reason = crm_strdup_printf("%s %s", op, shortpath+1); needs_update = TRUE; break; } } if(needs_update) { crm_info("Updating device list from the cib: %s", reason); cib_devices_update(); } else { crm_trace("No updates for device list found in cib"); } free(reason); } static void update_cib_stonith_devices_v1(const char *event, xmlNode * msg) { const char *reason = "none"; gboolean needs_update = FALSE; xmlXPathObjectPtr xpath_obj = NULL; /* process new constraints */ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION); if (numXpathResults(xpath_obj) > 0) { int max = numXpathResults(xpath_obj), lpc = 0; /* Safest and simplest to always recompute */ needs_update = TRUE; reason = "new location constraint"; for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpath_obj, lpc); crm_log_xml_trace(match, "new constraint"); } } freeXpathObject(xpath_obj); /* process deletions */ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE); if (numXpathResults(xpath_obj) > 0) { remove_cib_device(xpath_obj); } freeXpathObject(xpath_obj); /* process additions */ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE); if (numXpathResults(xpath_obj) > 0) { int max = numXpathResults(xpath_obj), lpc = 0; for (lpc = 0; lpc < max; lpc++) { const char *rsc_id = NULL; const char *standard = NULL; xmlNode *match = getXpathResult(xpath_obj, lpc); rsc_id = crm_element_value(match, XML_ATTR_ID); standard = crm_element_value(match, XML_AGENT_ATTR_CLASS); if (safe_str_neq(standard, PCMK_RESOURCE_CLASS_STONITH)) { continue; } crm_trace("Fencing resource %s was added or modified", rsc_id); reason = "new resource"; needs_update = TRUE; } } freeXpathObject(xpath_obj); if(needs_update) { crm_info("Updating device list from the cib: %s", reason); cib_devices_update(); } } static void update_cib_stonith_devices(const char *event, xmlNode * msg) { int format = 1; xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); CRM_ASSERT(patchset); crm_element_value_int(patchset, "format", &format); switch(format) { case 1: update_cib_stonith_devices_v1(event, msg); break; case 2: update_cib_stonith_devices_v2(event, msg); break; default: crm_warn("Unknown patch format: %d", format); } } /* Needs to hold node name + attribute name + attribute value + 75 */ #define XPATH_MAX 512 /*! * \internal * \brief Check whether a node has a specific attribute name/value * * \param[in] node Name of node to check * \param[in] name Name of an attribute to look for * \param[in] value The value the named attribute needs to be set to in order to be considered a match * * \return TRUE if the locally cached CIB has the specified node attribute */ gboolean node_has_attr(const char *node, const char *name, const char *value) { char xpath[XPATH_MAX]; xmlNode *match; int n; CRM_CHECK(local_cib != NULL, return FALSE); /* Search for the node's attributes in the CIB. While the schema allows * multiple sets of instance attributes, and allows instance attributes to * use id-ref to reference values elsewhere, that is intended for resources, * so we ignore that here. */ n = snprintf(xpath, XPATH_MAX, "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE "[@uname='%s']/" XML_TAG_ATTR_SETS "/" XML_CIB_TAG_NVPAIR "[@name='%s' and @value='%s']", node, name, value); match = get_xpath_object(xpath, local_cib, LOG_NEVER); CRM_CHECK(n < XPATH_MAX, return FALSE); return (match != NULL); } static void update_fencing_topology(const char *event, xmlNode * msg) { int format = 1; const char *xpath; xmlXPathObjectPtr xpathObj = NULL; xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); CRM_ASSERT(patchset); crm_element_value_int(patchset, "format", &format); if(format == 1) { /* Process deletions (only) */ xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL; xpathObj = xpath_search(msg, xpath); remove_fencing_topology(xpathObj); freeXpathObject(xpathObj); /* Process additions and changes */ xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL; xpathObj = xpath_search(msg, xpath); register_fencing_topology(xpathObj); freeXpathObject(xpathObj); } else if(format == 2) { xmlNode *change = NULL; int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; xml_patch_versions(patchset, add, del); for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) { const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); if(op == NULL) { continue; } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) { /* Change to a specific entry */ crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); if(strcmp(op, "move") == 0) { continue; } else if(strcmp(op, "create") == 0) { handle_topology_change(change->children, FALSE); } else if(strcmp(op, "modify") == 0) { xmlNode *match = first_named_child(change, XML_DIFF_RESULT); if(match) { handle_topology_change(match->children, TRUE); } } else if(strcmp(op, "delete") == 0) { /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */ crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) { /* Change to the topology in general */ crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) { /* Changes to the whole config section, possibly including the topology as a whild */ if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) { crm_trace("Nothing for us in %s operation %d.%d.%d for %s.", op, add[0], add[1], add[2], xpath); } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) { crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.", op, add[0], add[1], add[2], xpath); fencing_topology_init(); return; } } else { crm_trace("Nothing for us in %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath); } } } else { crm_warn("Unknown patch format: %d", format); } } static bool have_cib_devices = FALSE; static void update_cib_cache_cb(const char *event, xmlNode * msg) { int rc = pcmk_ok; xmlNode *stonith_enabled_xml = NULL; xmlNode *stonith_watchdog_xml = NULL; const char *stonith_enabled_s = NULL; static gboolean stonith_enabled_saved = TRUE; if(!have_cib_devices) { crm_trace("Skipping updates until we get a full dump"); return; } else if(msg == NULL) { crm_trace("Missing %s update", event); return; } /* Maintain a local copy of the CIB so that we have full access * to device definitions, location constraints, and node attributes */ if (local_cib != NULL) { int rc = pcmk_ok; xmlNode *patchset = NULL; crm_element_value_int(msg, F_CIB_RC, &rc); if (rc != pcmk_ok) { return; } patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT); xml_log_patchset(LOG_TRACE, "Config update", patchset); rc = xml_apply_patchset(local_cib, patchset, TRUE); switch (rc) { case pcmk_ok: case -pcmk_err_old_data: break; case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc); free_xml(local_cib); local_cib = NULL; break; default: crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc); free_xml(local_cib); local_cib = NULL; } } if (local_cib == NULL) { crm_trace("Re-requesting the full cib"); rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call); if(rc != pcmk_ok) { crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc); return; } CRM_ASSERT(local_cib != NULL); stonith_enabled_saved = FALSE; /* Trigger a full refresh below */ } crm_peer_caches_refresh(local_cib); stonith_enabled_xml = get_xpath_object("//nvpair[@name='stonith-enabled']", local_cib, LOG_NEVER); if (stonith_enabled_xml) { stonith_enabled_s = crm_element_value(stonith_enabled_xml, XML_NVPAIR_ATTR_VALUE); } if (stonith_enabled_s == NULL || crm_is_true(stonith_enabled_s)) { long timeout_ms = 0; const char *value = NULL; stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']", local_cib, LOG_NEVER); if (stonith_watchdog_xml) { value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE); } if(value) { timeout_ms = crm_get_msec(value); } if (timeout_ms < 0) { timeout_ms = pcmk__auto_watchdog_timeout(); } if(timeout_ms != stonith_watchdog_timeout_ms) { crm_notice("New watchdog timeout %lds (was %lds)", timeout_ms/1000, stonith_watchdog_timeout_ms/1000); stonith_watchdog_timeout_ms = timeout_ms; } } else { stonith_watchdog_timeout_ms = 0; } if (stonith_enabled_s && crm_is_true(stonith_enabled_s) == FALSE) { crm_trace("Ignoring cib updates while stonith is disabled"); stonith_enabled_saved = FALSE; return; } else if (stonith_enabled_saved == FALSE) { crm_info("Updating stonith device and topology lists now that stonith is enabled"); stonith_enabled_saved = TRUE; fencing_topology_init(); cib_devices_update(); } else { update_fencing_topology(event, msg); update_cib_stonith_devices(event, msg); } } static void init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { crm_info("Updating device list from the cib: init"); have_cib_devices = TRUE; local_cib = copy_xml(output); crm_peer_caches_refresh(local_cib); fencing_topology_init(); cib_devices_update(); } static void stonith_shutdown(int nsig) { crm_info("Terminating with %d clients", pcmk__ipc_client_count()); stonith_shutdown_flag = TRUE; if (mainloop != NULL && g_main_loop_is_running(mainloop)) { g_main_loop_quit(mainloop); } else { stonith_cleanup(); crm_exit(CRM_EX_OK); } } static void cib_connection_destroy(gpointer user_data) { if (stonith_shutdown_flag) { crm_info("Connection to the CIB manager closed"); return; } else { crm_crit("Lost connection to the CIB manager, shutting down"); } if (cib_api) { cib_api->cmds->signoff(cib_api); } stonith_shutdown(0); } static void stonith_cleanup(void) { if (cib_api) { cib_api->cmds->signoff(cib_api); } if (ipcs) { qb_ipcs_destroy(ipcs); } crm_peer_destroy(); pcmk__client_cleanup(); free_stonith_remote_op_list(); free_topology_list(); free_device_list(); free_metadata_cache(); free(stonith_our_uname); stonith_our_uname = NULL; free_xml(local_cib); local_cib = NULL; } static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "stand-alone", no_argument, 0, 's', NULL, pcmk__option_default }, { "stand-alone-w-cpg", no_argument, 0, 'c', NULL, pcmk__option_default }, { "logfile", required_argument, 0, 'l', NULL, pcmk__option_default }, { "verbose", no_argument, 0, 'V', NULL, pcmk__option_default }, { "version", no_argument, 0, '$', NULL, pcmk__option_default }, { "help", no_argument, 0, '?', NULL, pcmk__option_default }, { 0, 0, 0, 0 } }; static void setup_cib(void) { int rc, retries = 0; - static cib_t *(*cib_new_fn) (void) = NULL; - - if (cib_new_fn == NULL) { - cib_new_fn = find_library_function(&cib_library, CIB_LIBRARY, "cib_new", TRUE); - } - - if (cib_new_fn != NULL) { - cib_api = (*cib_new_fn) (); - } + cib_api = cib_new(); if (cib_api == NULL) { crm_err("No connection to the CIB manager"); return; } do { sleep(retries); rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command); } while (rc == -ENOTCONN && ++retries < 5); if (rc != pcmk_ok) { crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc); } else if (pcmk_ok != cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) { crm_err("Could not set CIB notification callback"); } else { rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local); cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb", init_cib_cache_cb); cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy); crm_info("Watching for stonith topology changes"); } } struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = st_ipc_accept, .connection_created = NULL, .msg_process = st_ipc_dispatch, .connection_closed = st_ipc_closed, .connection_destroyed = st_ipc_destroy }; /*! * \internal * \brief Callback for peer status changes * * \param[in] type What changed * \param[in] node What peer had the change * \param[in] data Previous value of what changed */ static void st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data) { if ((type != crm_status_processes) && !is_set(node->flags, crm_remote_node)) { /* * This is a hack until we can send to a nodeid and/or we fix node name lookups * These messages are ignored in stonith_peer_callback() */ xmlNode *query = create_xml_node(NULL, "stonith_command"); crm_xml_add(query, F_XML_TAGNAME, "stonith_command"); crm_xml_add(query, F_TYPE, T_STONITH_NG); crm_xml_add(query, F_STONITH_OPERATION, "poke"); crm_debug("Broadcasting our uname because of node %u", node->id); send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE); free_xml(query); } } int main(int argc, char **argv) { int flag; int lpc = 0; int argerr = 0; int option_index = 0; crm_cluster_t cluster; const char *actions[] = { "reboot", "off", "on", "list", "monitor", "status" }; crm_ipc_t *old_instance = NULL; crm_log_preinit(NULL, argc, argv); pcmk__set_cli_options(NULL, "[options]", long_options, "daemon for executing fencing devices in a " "Pacemaker cluster"); while (1) { flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); if (flag == -1) { break; } switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case 'l': crm_add_logfile(optarg); break; case 's': stand_alone = TRUE; break; case 'c': stand_alone = FALSE; no_cib_connect = TRUE; break; case '$': case '?': pcmk__cli_help(flag, CRM_EX_OK); break; default: ++argerr; break; } } if (argc - optind == 1 && safe_str_eq("metadata", argv[optind])) { printf("\n"); printf("\n"); printf(" 1.0\n"); printf(" Instance attributes available for all \"stonith\"-class resources" " and used by Pacemaker's fence daemon, formerly known as stonithd\n"); printf(" Instance attributes available for all \"stonith\"-class resources\n"); printf(" \n"); #if 0 // priority is not implemented yet printf(" \n"); printf(" Devices that are not in a topology " "are tried in order of highest to lowest integer priority\n"); printf(" \n"); printf(" \n"); #endif printf(" \n", STONITH_ATTR_HOSTARG); printf (" Advanced use only: An alternate parameter to supply instead of 'port'\n"); printf (" Some devices do not support the standard 'port' parameter or may provide additional ones.\n" "Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced.\n" "A value of 'none' can be used to tell the cluster not to supply any additional parameters.\n" " \n"); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_HOSTMAP); printf (" A mapping of host names to ports numbers for devices that do not support host names.\n"); printf (" Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2\n"); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_HOSTLIST); printf (" A list of machines controlled by this device (Optional unless %s=static-list).\n", STONITH_ATTR_HOSTCHECK); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_HOSTCHECK); printf (" How to determine which machines are controlled by the device.\n"); printf(" Allowed values: dynamic-list " "(query the device via the 'list' command), static-list " "(check the " STONITH_ATTR_HOSTLIST " attribute), status " "(query the device via the 'status' command), none (assume " "every device can fence every machine)\n"); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_DELAY_MAX); printf (" Enable a random delay for stonith actions and specify the maximum of random delay.\n"); printf (" This prevents double fencing when using slow devices such as sbd.\n" "Use this to enable a random delay for stonith actions.\n" "The overall delay is derived from this random delay value adding a static delay so that the sum is kept below the maximum delay.\n"); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_DELAY_BASE); printf (" Enable a base delay for stonith actions and specify base delay value.\n"); printf (" This prevents double fencing when different delays are configured on the nodes.\n" "Use this to enable a static delay for stonith actions.\n" "The overall delay is derived from a random delay value adding this static delay so that the sum is kept below the maximum delay.\n"); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_ACTION_LIMIT); printf (" The maximum number of actions can be performed in parallel on this device\n"); printf (" Cluster property concurrent-fencing=true needs to be configured first.\n" "Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.\n"); printf(" \n"); printf(" \n"); for (lpc = 0; lpc < DIMOF(actions); lpc++) { printf(" \n", actions[lpc]); printf (" Advanced use only: An alternate command to run instead of '%s'\n", actions[lpc]); printf (" Some devices do not support the standard commands or may provide additional ones.\n" "Use this to specify an alternate, device-specific, command that implements the '%s' action.\n", actions[lpc]); printf(" \n", actions[lpc]); printf(" \n"); printf(" \n", actions[lpc]); printf (" Advanced use only: Specify an alternate timeout to use for %s actions instead of stonith-timeout\n", actions[lpc]); printf (" Some devices need much more/less time to complete than normal.\n" "Use this to specify an alternate, device-specific, timeout for '%s' actions.\n", actions[lpc]); printf(" \n"); printf(" \n"); printf(" \n", actions[lpc]); printf (" Advanced use only: The maximum number of times to retry the '%s' command within the timeout period\n", actions[lpc]); printf(" Some devices do not support multiple connections." " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining." " Use this option to alter the number of times Pacemaker retries '%s' actions before giving up." "\n", actions[lpc]); printf(" \n"); printf(" \n"); } printf(" \n"); printf("\n"); return CRM_EX_OK; } if (optind != argc) { ++argerr; } if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); crm_notice("Starting Pacemaker fencer"); old_instance = crm_ipc_new("stonith-ng", 0); if (crm_ipc_connect(old_instance)) { /* IPC end-point already up */ crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_err("pacemaker-fenced is already active, aborting startup"); crm_exit(CRM_EX_OK); } else { /* not up or not authentic, we'll proceed either way */ crm_ipc_destroy(old_instance); old_instance = NULL; } mainloop_add_signal(SIGTERM, stonith_shutdown); crm_peer_init(); fenced_data_set = pe_new_working_set(); CRM_ASSERT(fenced_data_set != NULL); set_bit(fenced_data_set->flags, pe_flag_no_counts); set_bit(fenced_data_set->flags, pe_flag_no_compat); if (stand_alone == FALSE) { if (is_corosync_cluster()) { #if SUPPORT_COROSYNC cluster.destroy = stonith_peer_cs_destroy; cluster.cpg.cpg_deliver_fn = stonith_peer_ais_callback; cluster.cpg.cpg_confchg_fn = pcmk_cpg_membership; #endif } crm_set_status_callback(&st_peer_update_callback); if (crm_cluster_connect(&cluster) == FALSE) { crm_crit("Cannot sign in to the cluster... terminating"); crm_exit(CRM_EX_FATAL); } stonith_our_uname = cluster.uname; stonith_our_uuid = cluster.uuid; if (no_cib_connect == FALSE) { setup_cib(); } } else { stonith_our_uname = strdup("localhost"); } init_device_list(); init_topology_list(); if(stonith_watchdog_timeout_ms > 0) { int rc; xmlNode *xml; stonith_key_value_t *params = NULL; params = stonith_key_value_add(params, STONITH_ATTR_HOSTLIST, stonith_our_uname); xml = create_device_registration_xml("watchdog", st_namespace_internal, STONITH_WATCHDOG_AGENT, params, NULL); stonith_key_value_freeall(params, 1, 1); rc = stonith_device_register(xml, NULL, FALSE); free_xml(xml); if (rc != pcmk_ok) { crm_crit("Cannot register watchdog pseudo fence agent"); crm_exit(CRM_EX_FATAL); } } pcmk__serve_fenced_ipc(&ipcs, &ipc_callbacks); /* Create the mainloop and run it... */ mainloop = g_main_loop_new(NULL, FALSE); crm_notice("Pacemaker fencer successfully started and accepting connections"); g_main_loop_run(mainloop); stonith_cleanup(); pe_free_working_set(fenced_data_set); crm_exit(CRM_EX_OK); } diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c index 972b8742ce..5ed4626ee2 100644 --- a/daemons/pacemakerd/pacemakerd.c +++ b/daemons/pacemakerd/pacemakerd.c @@ -1,1466 +1,1463 @@ /* * Copyright 2010-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include "pacemakerd.h" #include #include #include #include #include #include #include #include #include #include #include /* indirectly: CRM_EX_* */ #include /* cib_channel_ro */ #include #include #include #include #include #include /* PCMK__SPECIAL_PID*, ... */ #ifdef SUPPORT_COROSYNC #include #endif #include #include static gboolean pcmk_quorate = FALSE; static gboolean fatal_error = FALSE; static GMainLoop *mainloop = NULL; static bool global_keep_tracking = false; #define PCMK_PROCESS_CHECK_INTERVAL 5 static const char *local_name = NULL; static uint32_t local_nodeid = 0; static crm_trigger_t *shutdown_trigger = NULL; static const char *pid_file = PCMK_RUN_DIR "/pacemaker.pid"; typedef struct pcmk_child_s { pid_t pid; long flag; int start_seq; int respawn_count; gboolean respawn; const char *name; const char *uid; const char *command; const char *endpoint; /* IPC server name */ gboolean active_before_startup; } pcmk_child_t; /* Index into the array below */ #define PCMK_CHILD_CONTROLD 3 static pcmk_child_t pcmk_children[] = { { 0, crm_proc_none, 0, 0, FALSE, "none", NULL, NULL }, { 0, crm_proc_execd, 3, 0, TRUE, "pacemaker-execd", NULL, CRM_DAEMON_DIR "/pacemaker-execd", CRM_SYSTEM_LRMD }, { 0, crm_proc_based, 1, 0, TRUE, "pacemaker-based", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-based", PCMK__SERVER_BASED_RO }, { 0, crm_proc_controld, 6, 0, TRUE, "pacemaker-controld", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-controld", CRM_SYSTEM_CRMD }, { 0, crm_proc_attrd, 4, 0, TRUE, "pacemaker-attrd", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-attrd", T_ATTRD }, { 0, crm_proc_schedulerd, 5, 0, TRUE, "pacemaker-schedulerd", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-schedulerd", CRM_SYSTEM_PENGINE }, { 0, crm_proc_fenced, 2, 0, TRUE, "pacemaker-fenced", NULL, CRM_DAEMON_DIR "/pacemaker-fenced", "stonith-ng" }, }; static gboolean check_active_before_startup_processes(gpointer user_data); static int child_liveness(pcmk_child_t *child); static gboolean start_child(pcmk_child_t * child); static gboolean update_node_processes(uint32_t id, const char *uname, uint32_t procs); void update_process_clients(pcmk__client_t *client); static uint32_t get_process_list(void) { int lpc = 0; uint32_t procs = crm_get_cluster_proc(); for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) { if (pcmk_children[lpc].pid != 0) { procs |= pcmk_children[lpc].flag; } } return procs; } static void pcmk_process_exit(pcmk_child_t * child) { child->pid = 0; child->active_before_startup = FALSE; /* Broadcast the fact that one of our processes died ASAP * * Try to get some logging of the cause out first though * because we're probably about to get fenced * * Potentially do this only if respawn_count > N * to allow for local recovery */ update_node_processes(local_nodeid, NULL, get_process_list()); child->respawn_count += 1; if (child->respawn_count > MAX_RESPAWN) { crm_err("Child respawn count exceeded by %s", child->name); child->respawn = FALSE; } if (shutdown_trigger) { /* resume step-wise shutdown (returned TRUE yields no parallelizing) */ mainloop_set_trigger(shutdown_trigger); /* intended to speed up propagating expected lay-off of the daemons? */ update_node_processes(local_nodeid, NULL, get_process_list()); } else if (!child->respawn) { /* nothing to do */ } else if (crm_is_true(getenv("PCMK_fail_fast"))) { crm_err("Rebooting system because of %s", child->name); pcmk_panic(__FUNCTION__); } else if (child_liveness(child) == pcmk_rc_ok) { crm_warn("One-off suppressing strict respawning of a child process %s," " appears alright per %s IPC end-point", child->name, child->endpoint); /* need to monitor how it evolves, and start new process if badly */ child->active_before_startup = TRUE; if (!global_keep_tracking) { global_keep_tracking = true; g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL, check_active_before_startup_processes, NULL); } } else { crm_notice("Respawning failed child process: %s", child->name); start_child(child); } } static void pcmk_exit_with_cluster(int exitcode) { #ifdef SUPPORT_COROSYNC corosync_cfg_handle_t cfg_handle; cs_error_t err; if (exitcode == CRM_EX_FATAL) { crm_info("Asking Corosync to shut down"); err = corosync_cfg_initialize(&cfg_handle, NULL); if (err != CS_OK) { crm_warn("Unable to open handle to corosync to close it down. err=%d", err); } err = corosync_cfg_try_shutdown(cfg_handle, COROSYNC_CFG_SHUTDOWN_FLAG_IMMEDIATE); if (err != CS_OK) { crm_warn("Corosync shutdown failed. err=%d", err); } corosync_cfg_finalize(cfg_handle); } #endif crm_exit(exitcode); } static void pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode) { pcmk_child_t *child = mainloop_child_userdata(p); const char *name = mainloop_child_name(p); if (signo) { do_crm_log(((signo == SIGKILL)? LOG_WARNING : LOG_ERR), "%s[%d] terminated with signal %d (core=%d)", name, pid, signo, core); } else { switch(exitcode) { case CRM_EX_OK: crm_info("%s[%d] exited with status %d (%s)", name, pid, exitcode, crm_exit_str(exitcode)); break; case CRM_EX_FATAL: crm_warn("Shutting cluster down because %s[%d] had fatal failure", name, pid); child->respawn = FALSE; fatal_error = TRUE; pcmk_shutdown(SIGTERM); break; case CRM_EX_PANIC: - do_crm_log_always(LOG_EMERG, - "%s[%d] instructed the machine to reset", - name, pid); + crm_emerg("%s[%d] instructed the machine to reset", name, pid); child->respawn = FALSE; fatal_error = TRUE; pcmk_panic(__FUNCTION__); pcmk_shutdown(SIGTERM); break; default: crm_err("%s[%d] exited with status %d (%s)", name, pid, exitcode, crm_exit_str(exitcode)); break; } } pcmk_process_exit(child); } static gboolean stop_child(pcmk_child_t * child, int signal) { if (signal == 0) { signal = SIGTERM; } /* why to skip PID of 1? - FreeBSD ~ how untrackable process behind IPC is masqueraded as - elsewhere: how "init" task is designated; in particular, in systemd arrangement of socket-based activation, this is pretty real */ if (child->command == NULL || child->pid == PCMK__SPECIAL_PID) { crm_debug("Nothing to do for child \"%s\" (process %lld)", child->name, (long long) PCMK__SPECIAL_PID_AS_0(child->pid)); return TRUE; } if (child->pid <= 0) { crm_trace("Client %s not running", child->name); return TRUE; } errno = 0; if (kill(child->pid, signal) == 0) { crm_notice("Stopping %s "CRM_XS" sent signal %d to process %lld", child->name, signal, (long long) child->pid); } else { crm_err("Could not stop %s (process %lld) with signal %d: %s", child->name, (long long) child->pid, signal, strerror(errno)); } return TRUE; } static char *opts_default[] = { NULL, NULL }; static char *opts_vgrind[] = { NULL, NULL, NULL, NULL, NULL }; /* TODO once libqb is taught to juggle with IPC end-points carried over as bare file descriptor (https://github.com/ClusterLabs/libqb/issues/325) it shall hand over these descriptors here if/once they are successfully pre-opened in (presumably) child_liveness(), to avoid any remaining room for races */ static gboolean start_child(pcmk_child_t * child) { uid_t uid = 0; gid_t gid = 0; gboolean use_valgrind = FALSE; gboolean use_callgrind = FALSE; - const char *devnull = "/dev/null"; const char *env_valgrind = getenv("PCMK_valgrind_enabled"); const char *env_callgrind = getenv("PCMK_callgrind_enabled"); child->active_before_startup = FALSE; if (child->command == NULL) { crm_info("Nothing to do for child \"%s\"", child->name); return TRUE; } if (env_callgrind != NULL && crm_is_true(env_callgrind)) { use_callgrind = TRUE; use_valgrind = TRUE; } else if (env_callgrind != NULL && strstr(env_callgrind, child->name)) { use_callgrind = TRUE; use_valgrind = TRUE; } else if (env_valgrind != NULL && crm_is_true(env_valgrind)) { use_valgrind = TRUE; } else if (env_valgrind != NULL && strstr(env_valgrind, child->name)) { use_valgrind = TRUE; } if (use_valgrind && strlen(VALGRIND_BIN) == 0) { crm_warn("Cannot enable valgrind for %s:" " The location of the valgrind binary is unknown", child->name); use_valgrind = FALSE; } if (child->uid) { if (crm_user_lookup(child->uid, &uid, &gid) < 0) { crm_err("Invalid user (%s) for %s: not found", child->uid, child->name); return FALSE; } crm_info("Using uid=%u and group=%u for process %s", uid, gid, child->name); } child->pid = fork(); CRM_ASSERT(child->pid != -1); if (child->pid > 0) { /* parent */ mainloop_child_add(child->pid, 0, child->name, child, pcmk_child_exit); crm_info("Forked child %lld for process %s%s", (long long) child->pid, child->name, use_valgrind ? " (valgrind enabled: " VALGRIND_BIN ")" : ""); update_node_processes(local_nodeid, NULL, get_process_list()); return TRUE; } else { /* Start a new session */ (void)setsid(); /* Setup the two alternate arg arrays */ opts_vgrind[0] = strdup(VALGRIND_BIN); if (use_callgrind) { opts_vgrind[1] = strdup("--tool=callgrind"); opts_vgrind[2] = strdup("--callgrind-out-file=" CRM_STATE_DIR "/callgrind.out.%p"); opts_vgrind[3] = strdup(child->command); opts_vgrind[4] = NULL; } else { opts_vgrind[1] = strdup(child->command); opts_vgrind[2] = NULL; opts_vgrind[3] = NULL; opts_vgrind[4] = NULL; } opts_default[0] = strdup(child->command); if(gid) { // Whether we need root group access to talk to cluster layer bool need_root_group = TRUE; if (is_corosync_cluster()) { /* Corosync clusters can drop root group access, because we set * uidgid.gid.${gid}=1 via CMAP, which allows these processes to * connect to corosync. */ need_root_group = FALSE; } // Drop root group access if not needed if (!need_root_group && (setgid(gid) < 0)) { crm_perror(LOG_ERR, "Could not set group to %d", gid); } /* Initialize supplementary groups to only those always granted to * the user, plus haclient (so we can access IPC). */ if (initgroups(child->uid, gid) < 0) { crm_err("Cannot initialize groups for %s: %s (%d)", child->uid, pcmk_strerror(errno), errno); } } if (uid && setuid(uid) < 0) { crm_perror(LOG_ERR, "Could not set user to %d (%s)", uid, child->uid); } pcmk__close_fds_in_child(true); - (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ - (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ - (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ + pcmk__open_devnull(O_RDONLY); // stdin (fd 0) + pcmk__open_devnull(O_WRONLY); // stdout (fd 1) + pcmk__open_devnull(O_WRONLY); // stderr (fd 2) if (use_valgrind) { (void)execvp(VALGRIND_BIN, opts_vgrind); } else { (void)execvp(child->command, opts_default); } crm_perror(LOG_ERR, "FATAL: Cannot exec %s", child->command); crm_exit(CRM_EX_FATAL); } return TRUE; /* never reached */ } static gboolean escalate_shutdown(gpointer data) { pcmk_child_t *child = data; if (child->pid == PCMK__SPECIAL_PID) { pcmk_process_exit(child); } else if (child->pid != 0) { /* Use SIGSEGV instead of SIGKILL to create a core so we can see what it was up to */ crm_err("Child %s not terminating in a timely manner, forcing", child->name); stop_child(child, SIGSEGV); } return FALSE; } #define SHUTDOWN_ESCALATION_PERIOD 180000 /* 3m */ static gboolean pcmk_shutdown_worker(gpointer user_data) { static int phase = 0; static time_t next_log = 0; static int max = SIZEOF(pcmk_children); int lpc = 0; if (phase == 0) { crm_notice("Shutting down Pacemaker"); phase = max; } for (; phase > 0; phase--) { /* Don't stop anything with start_seq < 1 */ for (lpc = max - 1; lpc >= 0; lpc--) { pcmk_child_t *child = &(pcmk_children[lpc]); if (phase != child->start_seq) { continue; } if (child->pid != 0) { time_t now = time(NULL); if (child->respawn) { if (child->pid == PCMK__SPECIAL_PID) { crm_warn("The process behind %s IPC cannot be" " terminated, so either wait the graceful" " period of %ld s for its native termination" " if it vitally depends on some other daemons" " going down in a controlled way already," " or locate and kill the correct %s process" " on your own; set PCMK_fail_fast=1 to avoid" " this altogether next time around", child->name, (long) SHUTDOWN_ESCALATION_PERIOD, child->command); } next_log = now + 30; child->respawn = FALSE; stop_child(child, SIGTERM); if (phase < pcmk_children[PCMK_CHILD_CONTROLD].start_seq) { g_timeout_add(SHUTDOWN_ESCALATION_PERIOD, escalate_shutdown, child); } } else if (now >= next_log) { next_log = now + 30; crm_notice("Still waiting for %s to terminate " CRM_XS " pid=%lld seq=%d", child->name, (long long) child->pid, child->start_seq); } return TRUE; } /* cleanup */ crm_debug("%s confirmed stopped", child->name); child->pid = 0; } } /* send_cluster_id(); */ crm_notice("Shutdown complete"); { const char *delay = pcmk__env_option("shutdown_delay"); if(delay) { sync(); sleep(crm_get_msec(delay) / 1000); } } g_main_loop_quit(mainloop); if (fatal_error) { crm_notice("Shutting down and staying down after fatal error"); pcmk_exit_with_cluster(CRM_EX_FATAL); } return TRUE; } static void pcmk_ignore(int nsig) { crm_info("Ignoring signal %s (%d)", strsignal(nsig), nsig); } static void pcmk_sigquit(int nsig) { pcmk_panic(__FUNCTION__); } void pcmk_shutdown(int nsig) { if (shutdown_trigger == NULL) { shutdown_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, pcmk_shutdown_worker, NULL); } mainloop_set_trigger(shutdown_trigger); } static int32_t pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { return -EIO; } return 0; } /* Exit code means? */ static int32_t pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; const char *task = NULL; pcmk__client_t *c = pcmk__find_client(qbc); xmlNode *msg = pcmk__client_data2xml(c, data, &id, &flags); pcmk__ipc_send_ack(c, id, flags, "ack"); if (msg == NULL) { return 0; } task = crm_element_value(msg, F_CRM_TASK); if (crm_str_eq(task, CRM_OP_QUIT, TRUE)) { /* Time to quit */ crm_notice("Shutting down in response to ticket %s (%s)", crm_element_value(msg, F_CRM_REFERENCE), crm_element_value(msg, F_CRM_ORIGIN)); pcmk_shutdown(15); } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) { /* Send to everyone */ struct iovec *iov; int id = 0; const char *name = NULL; crm_element_value_int(msg, XML_ATTR_ID, &id); name = crm_element_value(msg, XML_ATTR_UNAME); crm_notice("Instructing peers to remove references to node %s/%u", name, id); iov = calloc(1, sizeof(struct iovec)); iov->iov_base = dump_xml_unformatted(msg); iov->iov_len = 1 + strlen(iov->iov_base); send_cpg_iov(iov); } else { update_process_clients(c); } free_xml(msg); return 0; } /* Error code means? */ static int32_t pcmk_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); pcmk__free_client(client); return 0; } static void pcmk_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); pcmk_ipc_closed(c); } struct qb_ipcs_service_handlers mcp_ipc_callbacks = { .connection_accept = pcmk_ipc_accept, .connection_created = NULL, .msg_process = pcmk_ipc_dispatch, .connection_closed = pcmk_ipc_closed, .connection_destroyed = pcmk_ipc_destroy }; static void send_xml_to_client(gpointer key, gpointer value, gpointer user_data) { pcmk__ipc_send_xml((pcmk__client_t *) value, 0, (xmlNode *) user_data, crm_ipc_server_event); } /*! * \internal * \brief Send an XML message with process list of all known peers to client(s) * * \param[in] client Send message to this client, or all clients if NULL */ void update_process_clients(pcmk__client_t *client) { GHashTableIter iter; crm_node_t *node = NULL; xmlNode *update = create_xml_node(NULL, "nodes"); if (is_corosync_cluster()) { crm_xml_add_int(update, "quorate", pcmk_quorate); } g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { xmlNode *xml = create_xml_node(update, "node"); crm_xml_add_int(xml, "id", node->id); crm_xml_add(xml, "uname", node->uname); crm_xml_add(xml, "state", node->state); crm_xml_add_int(xml, "processes", node->processes); } if(client) { crm_trace("Sending process list to client %s", client->id); send_xml_to_client(NULL, client, update); } else { crm_trace("Sending process list to %d clients", pcmk__ipc_client_count()); pcmk__foreach_ipc_client(send_xml_to_client, update); } free_xml(update); } /*! * \internal * \brief Send a CPG message with local node's process list to all peers */ static void update_process_peers(void) { /* Do nothing for corosync-2 based clusters */ struct iovec *iov = calloc(1, sizeof(struct iovec)); CRM_ASSERT(iov); if (local_name) { iov->iov_base = crm_strdup_printf("", local_name, get_process_list()); } else { iov->iov_base = crm_strdup_printf("", get_process_list()); } iov->iov_len = strlen(iov->iov_base) + 1; crm_trace("Sending %s", (char*) iov->iov_base); send_cpg_iov(iov); } /*! * \internal * \brief Update a node's process list, notifying clients and peers if needed * * \param[in] id Node ID of affected node * \param[in] uname Uname of affected node * \param[in] procs Affected node's process list mask * * \return TRUE if the process list changed, FALSE otherwise */ static gboolean update_node_processes(uint32_t id, const char *uname, uint32_t procs) { gboolean changed = FALSE; crm_node_t *node = crm_get_peer(id, uname); if (procs != 0) { if (procs != node->processes) { crm_debug("Node %s now has process list: %.32x (was %.32x)", node->uname, procs, node->processes); node->processes = procs; changed = TRUE; /* If local node's processes have changed, notify clients/peers */ if (id == local_nodeid) { update_process_clients(NULL); update_process_peers(); } } else { crm_trace("Node %s still has process list: %.32x", node->uname, procs); } } return changed; } static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "help", no_argument, NULL, '?', "\tThis text", pcmk__option_default }, { "version", no_argument, NULL, '$', "\tVersion information", pcmk__option_default }, { "verbose", no_argument, NULL, 'V', "\tIncrease debug output", pcmk__option_default }, { "shutdown", no_argument, NULL, 'S', "\tInstruct Pacemaker to shutdown on this machine", pcmk__option_default }, { "features", no_argument, NULL, 'F', "\tDisplay full version and list of features Pacemaker was built with", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:", pcmk__option_default }, { "foreground", no_argument, NULL, 'f', "\t(Ignored) Pacemaker always runs in the foreground", pcmk__option_default }, { "pid-file", required_argument, NULL, 'p', "\t(Ignored) Daemon pid file location", pcmk__option_default }, { "standby", no_argument, NULL, 's', "\tStart node in standby state", pcmk__option_default }, { 0, 0, 0, 0 } }; static void mcp_chown(const char *path, uid_t uid, gid_t gid) { int rc = chown(path, uid, gid); if (rc < 0) { crm_warn("Cannot change the ownership of %s to user %s and gid %d: %s", path, CRM_DAEMON_USER, gid, pcmk_strerror(errno)); } } /*! * \internal * \brief Check the liveness of the child based on IPC name and PID if tracked * * \param[inout] child Child tracked data * * \return Standard Pacemaker return code * * \note Return codes of particular interest include pcmk_rc_ipc_unresponsive * indicating that no trace of IPC liveness was detected, * pcmk_rc_ipc_unauthorized indicating that the IPC endpoint is blocked by * an unauthorized process, and pcmk_rc_ipc_pid_only indicating that * the child is up by PID but not IPC end-point (possibly starting). * \note This function doesn't modify any of \p child members but \c pid, * and is not actively toying with processes as such but invoking * \c stop_child in one particular case (there's for some reason * a different authentic holder of the IPC end-point). */ static int child_liveness(pcmk_child_t *child) { uid_t cl_uid = 0; gid_t cl_gid = 0; const uid_t root_uid = 0; const gid_t root_gid = 0; const uid_t *ref_uid; const gid_t *ref_gid; int rc = pcmk_rc_ipc_unresponsive; pid_t ipc_pid = 0; if (child->endpoint == NULL && (child->pid <= 0 || child->pid == PCMK__SPECIAL_PID)) { crm_err("Cannot track child %s for missing both API end-point and PID", child->name); rc = EINVAL; // Misuse of function when child is not trackable } else if (child->endpoint != NULL) { int legacy_rc = pcmk_ok; if (child->uid == NULL) { ref_uid = &root_uid; ref_gid = &root_gid; } else { ref_uid = &cl_uid; ref_gid = &cl_gid; legacy_rc = pcmk_daemon_user(&cl_uid, &cl_gid); } if (legacy_rc < 0) { rc = pcmk_legacy2rc(legacy_rc); crm_err("Could not find user and group IDs for user %s: %s " CRM_XS " rc=%d", CRM_DAEMON_USER, pcmk_rc_str(rc), rc); } else { rc = pcmk__ipc_is_authentic_process_active(child->endpoint, *ref_uid, *ref_gid, &ipc_pid); if ((rc == pcmk_rc_ok) || (rc == pcmk_rc_ipc_unresponsive)) { if (child->pid <= 0) { /* If rc is pcmk_rc_ok, ipc_pid is nonzero and this * initializes a new child. If rc is * pcmk_rc_ipc_unresponsive, ipc_pid is zero, and we will * investigate further. */ child->pid = ipc_pid; } else if ((ipc_pid != 0) && (child->pid != ipc_pid)) { /* An unexpected (but authorized) process is responding to * IPC. Investigate further. */ rc = pcmk_rc_ipc_unresponsive; } } } } if (rc == pcmk_rc_ipc_unresponsive) { /* If we get here, a child without IPC is being tracked, no IPC liveness * has been detected, or IPC liveness has been detected with an * unexpected (but authorized) process. This is safe on FreeBSD since * the only change possible from a proper child's PID into "special" PID * of 1 behind more loosely related process. */ int ret = pcmk__pid_active(child->pid, child->name); if (ipc_pid && ((ret != pcmk_rc_ok) || ipc_pid == PCMK__SPECIAL_PID || (pcmk__pid_active(ipc_pid, child->name) == pcmk_rc_ok))) { /* An unexpected (but authorized) process was detected at the IPC * endpoint, and either it is active, or the child we're tracking is * not. */ if (ret == pcmk_rc_ok) { /* The child we're tracking is active. Kill it, and adopt the * detected process. This assumes that our children don't fork * (thus getting a different PID owning the IPC), but rather the * tracking got out of sync because of some means external to * Pacemaker, and adopting the detected process is better than * killing it and possibly having to spawn a new child. */ /* not possessing IPC, afterall (what about corosync CPG?) */ stop_child(child, SIGKILL); } rc = pcmk_rc_ok; child->pid = ipc_pid; } else if (ret == pcmk_rc_ok) { // Our tracked child's PID was found active, but not its IPC rc = pcmk_rc_ipc_pid_only; } else if ((child->pid == 0) && (ret == EINVAL)) { // FreeBSD can return EINVAL rc = pcmk_rc_ipc_unresponsive; } else { switch (ret) { case EACCES: rc = pcmk_rc_ipc_unauthorized; break; case ESRCH: rc = pcmk_rc_ipc_unresponsive; break; default: rc = ret; break; } } } return rc; } static gboolean check_active_before_startup_processes(gpointer user_data) { int start_seq = 1, lpc = 0; static int max = SIZEOF(pcmk_children); gboolean keep_tracking = FALSE; for (start_seq = 1; start_seq < max; start_seq++) { for (lpc = 0; lpc < max; lpc++) { if (pcmk_children[lpc].active_before_startup == FALSE) { /* we are already tracking it as a child process. */ continue; } else if (start_seq != pcmk_children[lpc].start_seq) { continue; } else { int rc = child_liveness(&pcmk_children[lpc]); switch (rc) { case pcmk_rc_ok: break; case pcmk_rc_ipc_unresponsive: case pcmk_rc_ipc_pid_only: // This case: it was previously OK if (pcmk_children[lpc].respawn == TRUE) { crm_err("%s[%lld] terminated%s", pcmk_children[lpc].name, (long long) PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid), (rc == pcmk_rc_ipc_pid_only)? " as IPC server" : ""); } else { /* orderly shutdown */ crm_notice("%s[%lld] terminated%s", pcmk_children[lpc].name, (long long) PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid), (rc == pcmk_rc_ipc_pid_only)? " as IPC server" : ""); } pcmk_process_exit(&(pcmk_children[lpc])); continue; default: crm_exit(CRM_EX_FATAL); break; /* static analysis/noreturn */ } } /* at least one of the processes found at startup * is still going, so keep this recurring timer around */ keep_tracking = TRUE; } } global_keep_tracking = keep_tracking; return keep_tracking; } /*! * \internal * \brief Initial one-off check of the pre-existing "child" processes * * With "child" process, we mean the subdaemon that defines an API end-point * (all of them do as of the comment) -- the possible complement is skipped * as it is deemed it has no such shared resources to cause conflicts about, * hence it can presumably be started anew without hesitation. * If that won't hold true in the future, the concept of a shared resource * will have to be generalized beyond the API end-point. * * For boundary cases that the "child" is still starting (IPC end-point is yet * to be witnessed), or more rarely (practically FreeBSD only), when there's * a pre-existing "untrackable" authentic process, we give the situation some * time to possibly unfold in the right direction, meaning that said socket * will appear or the unattainable process will disappear per the observable * IPC, respectively. * * \return Standard Pacemaker return code * * \note Since this gets run at the very start, \c respawn_count fields * for particular children get temporarily overloaded with "rounds * of waiting" tracking, restored once we are about to finish with * success (i.e. returning value >=0) and will remain unrestored * otherwise. One way to suppress liveness detection logic for * particular child is to set the said value to a negative number. */ #define WAIT_TRIES 4 /* together with interleaved sleeps, worst case ~ 1s */ static int find_and_track_existing_processes(void) { bool tracking = false; bool wait_in_progress; int rc; size_t i, rounds; for (rounds = 1; rounds <= WAIT_TRIES; rounds++) { wait_in_progress = false; for (i = 0; i < SIZEOF(pcmk_children); i++) { if ((pcmk_children[i].endpoint == NULL) || (pcmk_children[i].respawn_count < 0)) { continue; } rc = child_liveness(&pcmk_children[i]); if (rc == pcmk_rc_ipc_unresponsive) { /* As a speculation, don't give up if there are more rounds to * come for other reasons, but don't artificially wait just * because of this, since we would preferably start ASAP. */ continue; } pcmk_children[i].respawn_count = rounds; switch (rc) { case pcmk_rc_ok: if (pcmk_children[i].pid == PCMK__SPECIAL_PID) { if (crm_is_true(getenv("PCMK_fail_fast"))) { crm_crit("Cannot reliably track pre-existing" " authentic process behind %s IPC on this" " platform and PCMK_fail_fast requested", pcmk_children[i].endpoint); return EOPNOTSUPP; } else if (pcmk_children[i].respawn_count == WAIT_TRIES) { crm_notice("Assuming pre-existing authentic, though" " on this platform untrackable, process" " behind %s IPC is stable (was in %d" " previous samples) so rather than" " bailing out (PCMK_fail_fast not" " requested), we just switch to a less" " optimal IPC liveness monitoring" " (not very suitable for heavy load)", pcmk_children[i].name, WAIT_TRIES - 1); crm_warn("The process behind %s IPC cannot be" " terminated, so the overall shutdown" " will get delayed implicitly (%ld s)," " which serves as a graceful period for" " its native termination if it vitally" " depends on some other daemons going" " down in a controlled way already", pcmk_children[i].name, (long) SHUTDOWN_ESCALATION_PERIOD); } else { wait_in_progress = true; crm_warn("Cannot reliably track pre-existing" " authentic process behind %s IPC on this" " platform, can still disappear in %d" " attempt(s)", pcmk_children[i].endpoint, WAIT_TRIES - pcmk_children[i].respawn_count); continue; } } crm_notice("Tracking existing %s process (pid=%lld)", pcmk_children[i].name, (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[i].pid)); pcmk_children[i].respawn_count = -1; /* 0~keep watching */ pcmk_children[i].active_before_startup = TRUE; tracking = true; break; case pcmk_rc_ipc_pid_only: if (pcmk_children[i].respawn_count == WAIT_TRIES) { crm_crit("%s IPC end-point for existing authentic" " process %lld did not (re)appear", pcmk_children[i].endpoint, (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[i].pid)); return rc; } wait_in_progress = true; crm_warn("Cannot find %s IPC end-point for existing" " authentic process %lld, can still (re)appear" " in %d attempts (?)", pcmk_children[i].endpoint, (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[i].pid), WAIT_TRIES - pcmk_children[i].respawn_count); continue; default: crm_crit("Checked liveness of %s: %s " CRM_XS " rc=%d", pcmk_children[i].name, pcmk_rc_str(rc), rc); return rc; } } if (!wait_in_progress) { break; } (void) poll(NULL, 0, 250); /* a bit for changes to possibly happen */ } for (i = 0; i < SIZEOF(pcmk_children); i++) { pcmk_children[i].respawn_count = 0; /* restore pristine state */ } if (tracking) { g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL, check_active_before_startup_processes, NULL); } return pcmk_rc_ok; } static void init_children_processes(void) { int start_seq = 1, lpc = 0; static int max = SIZEOF(pcmk_children); /* start any children that have not been detected */ for (start_seq = 1; start_seq < max; start_seq++) { /* don't start anything with start_seq < 1 */ for (lpc = 0; lpc < max; lpc++) { if (pcmk_children[lpc].pid != 0) { /* we are already tracking it */ continue; } if (start_seq == pcmk_children[lpc].start_seq) { start_child(&(pcmk_children[lpc])); } } } /* From this point on, any daemons being started will be due to * respawning rather than node start. * * This may be useful for the daemons to know */ setenv("PCMK_respawned", "true", 1); } static void mcp_cpg_destroy(gpointer user_data) { crm_crit("Lost connection to cluster layer, shutting down"); crm_exit(CRM_EX_DISCONNECT); } /*! * \internal * \brief Process a CPG message (process list or manual peer cache removal) * * \param[in] handle CPG connection (ignored) * \param[in] groupName CPG group name (ignored) * \param[in] nodeid ID of affected node * \param[in] pid Process ID (ignored) * \param[in] msg CPG XML message * \param[in] msg_len Length of msg in bytes (ignored) */ static void mcp_cpg_deliver(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { xmlNode *xml = string2xml(msg); const char *task = crm_element_value(xml, F_CRM_TASK); crm_trace("Received CPG message (%s): %.200s", (task? task : "process list"), (char*)msg); if (task == NULL) { if (nodeid == local_nodeid) { crm_debug("Ignoring message with local node's process list"); } else { uint32_t procs = 0; const char *uname = crm_element_value(xml, "uname"); crm_element_value_int(xml, "proclist", (int *)&procs); if (update_node_processes(nodeid, uname, procs)) { update_process_clients(NULL); } } } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) { int id = 0; const char *name = NULL; crm_element_value_int(xml, XML_ATTR_ID, &id); name = crm_element_value(xml, XML_ATTR_UNAME); reap_crm_member(id, name); } if (xml != NULL) { free_xml(xml); } } static void mcp_cpg_membership(cpg_handle_t handle, const struct cpg_name *groupName, const struct cpg_address *member_list, size_t member_list_entries, const struct cpg_address *left_list, size_t left_list_entries, const struct cpg_address *joined_list, size_t joined_list_entries) { /* Update peer cache if needed */ pcmk_cpg_membership(handle, groupName, member_list, member_list_entries, left_list, left_list_entries, joined_list, joined_list_entries); /* Always broadcast our own presence after any membership change */ update_process_peers(); } static gboolean mcp_quorum_callback(unsigned long long seq, gboolean quorate) { pcmk_quorate = quorate; return TRUE; } static void mcp_quorum_destroy(gpointer user_data) { crm_info("connection lost"); } int main(int argc, char **argv) { int rc; int flag; int argerr = 0; int option_index = 0; gboolean shutdown = FALSE; uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; struct rlimit cores; crm_ipc_t *old_instance = NULL; qb_ipcs_service_t *ipcs = NULL; static crm_cluster_t cluster; crm_log_preinit(NULL, argc, argv); pcmk__set_cli_options(NULL, "[options]", long_options, "primary Pacemaker daemon that launches and " "monitors all subsidiary Pacemaker daemons"); mainloop_add_signal(SIGHUP, pcmk_ignore); mainloop_add_signal(SIGQUIT, pcmk_sigquit); while (1) { flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case 'f': /* Legacy */ break; case 'p': pid_file = optarg; break; case 's': pcmk__set_env_option("node_start_state", "standby"); break; case '$': case '?': pcmk__cli_help(flag, CRM_EX_OK); break; case 'S': shutdown = TRUE; break; case 'F': printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURE_SET, CRM_FEATURES); crm_exit(CRM_EX_OK); default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); } if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } setenv("LC_ALL", "C", 1); pcmk__set_env_option("mcp", "true"); crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); crm_debug("Checking for existing Pacemaker instance"); old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0); (void) crm_ipc_connect(old_instance); if (shutdown) { crm_debug("Shutting down existing Pacemaker instance by request"); while (crm_ipc_connected(old_instance)) { xmlNode *cmd = create_request(CRM_OP_QUIT, NULL, NULL, CRM_SYSTEM_MCP, CRM_SYSTEM_MCP, NULL); crm_debug("."); crm_ipc_send(old_instance, cmd, 0, 0, NULL); free_xml(cmd); sleep(2); } crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_exit(CRM_EX_OK); } else if (crm_ipc_connected(old_instance)) { crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_err("Aborting start-up because active Pacemaker instance found"); crm_exit(CRM_EX_FATAL); } crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); if (mcp_read_config() == FALSE) { crm_notice("Could not obtain corosync config data, exiting"); crm_exit(CRM_EX_UNAVAILABLE); } // OCF shell functions and cluster-glue need facility under different name { const char *facility = pcmk__env_option("logfacility"); if (facility && safe_str_neq(facility, "none")) { setenv("HA_LOGFACILITY", facility, 1); } } crm_notice("Starting Pacemaker %s "CRM_XS" build=%s features:%s", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); mainloop = g_main_loop_new(NULL, FALSE); rc = getrlimit(RLIMIT_CORE, &cores); if (rc < 0) { crm_perror(LOG_ERR, "Cannot determine current maximum core size."); } else { if (cores.rlim_max == 0 && geteuid() == 0) { cores.rlim_max = RLIM_INFINITY; } else { crm_info("Maximum core file size is: %lu", (unsigned long)cores.rlim_max); } cores.rlim_cur = cores.rlim_max; rc = setrlimit(RLIMIT_CORE, &cores); if (rc < 0) { crm_perror(LOG_ERR, "Core file generation will remain disabled." " Core files are an important diagnostic tool, so" " please consider enabling them by default."); } } if (pcmk_daemon_user(&pcmk_uid, &pcmk_gid) < 0) { crm_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER); crm_exit(CRM_EX_NOUSER); } // Used by some resource agents if ((mkdir(CRM_STATE_DIR, 0750) < 0) && (errno != EEXIST)) { crm_warn("Could not create " CRM_STATE_DIR ": %s", pcmk_strerror(errno)); } else { mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid); } /* Used to store core/blackbox/scheduler/cib files in */ crm_build_path(CRM_PACEMAKER_DIR, 0750); mcp_chown(CRM_PACEMAKER_DIR, pcmk_uid, pcmk_gid); /* Used to store core files in */ crm_build_path(CRM_CORE_DIR, 0750); mcp_chown(CRM_CORE_DIR, pcmk_uid, pcmk_gid); /* Used to store blackbox dumps in */ crm_build_path(CRM_BLACKBOX_DIR, 0750); mcp_chown(CRM_BLACKBOX_DIR, pcmk_uid, pcmk_gid); // Used to store scheduler inputs in crm_build_path(PE_STATE_DIR, 0750); mcp_chown(PE_STATE_DIR, pcmk_uid, pcmk_gid); /* Used to store the cluster configuration */ crm_build_path(CRM_CONFIG_DIR, 0750); mcp_chown(CRM_CONFIG_DIR, pcmk_uid, pcmk_gid); // Don't build CRM_RSCTMP_DIR, pacemaker-execd will do it ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, &mcp_ipc_callbacks); if (ipcs == NULL) { crm_err("Couldn't start IPC server"); crm_exit(CRM_EX_OSERR); } /* Allows us to block shutdown */ if (cluster_connect_cfg(&local_nodeid) == FALSE) { crm_err("Couldn't connect to Corosync's CFG service"); crm_exit(CRM_EX_PROTOCOL); } if(pcmk_locate_sbd() > 0) { setenv("PCMK_watchdog", "true", 1); } else { setenv("PCMK_watchdog", "false", 1); } switch (find_and_track_existing_processes()) { case pcmk_rc_ok: break; case pcmk_rc_ipc_unauthorized: crm_exit(CRM_EX_CANTCREAT); default: crm_exit(CRM_EX_FATAL); }; cluster.destroy = mcp_cpg_destroy; cluster.cpg.cpg_deliver_fn = mcp_cpg_deliver; cluster.cpg.cpg_confchg_fn = mcp_cpg_membership; crm_set_autoreap(FALSE); rc = pcmk_ok; if (cluster_connect_cpg(&cluster) == FALSE) { crm_err("Couldn't connect to Corosync's CPG service"); rc = -ENOPROTOOPT; } else if (cluster_connect_quorum(mcp_quorum_callback, mcp_quorum_destroy) == FALSE) { rc = -ENOTCONN; } else { local_name = get_local_node_name(); update_node_processes(local_nodeid, local_name, get_process_list()); mainloop_add_signal(SIGTERM, pcmk_shutdown); mainloop_add_signal(SIGINT, pcmk_shutdown); init_children_processes(); crm_notice("Pacemaker daemon successfully started and accepting connections"); g_main_loop_run(mainloop); } if (ipcs) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; } g_main_loop_unref(mainloop); cluster_disconnect_cpg(&cluster); cluster_disconnect_cfg(); crm_exit(crm_errno2exit(rc)); } diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt index a6b4fdd2eb..58080b1557 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt @@ -1,481 +1,482 @@ :compat-mode: legacy = Cluster-Wide Configuration = == Configuration Layout == The cluster is defined by the Cluster Information Base (CIB), which uses XML notation. The simplest CIB, an empty one, looks like this: .An empty configuration ====== [source,XML] ------- ------- ====== The empty configuration above contains the major sections that make up a CIB: * +cib+: The entire CIB is enclosed with a +cib+ tag. Certain fundamental settings are defined as attributes of this tag. ** +configuration+: This section -- the primary focus of this document -- contains traditional configuration information such as what resources the cluster serves and the relationships among them. *** +crm_config+: cluster-wide configuration options *** +nodes+: the machines that host the cluster *** +resources+: the services run by the cluster *** +constraints+: indications of how resources should be placed ** +status+: This section contains the history of each resource on each node. Based on this data, the cluster can construct the complete current state of the cluster. The authoritative source for this section is the local executor (pacemaker-execd process) on each cluster node, and the cluster will occasionally repopulate the entire section. For this reason, it is never written to disk, and administrators are advised against modifying it in any way. In this document, configuration settings will be described as 'properties' or 'options' based on how they are defined in the CIB: * Properties are XML attributes of an XML element. * Options are name-value pairs expressed as +nvpair+ child elements of an XML element. Normally, you will use command-line tools that abstract the XML, so the distinction will be unimportant; both properties and options are cluster settings you can tweak. == CIB Properties == Certain settings are defined by CIB properties (that is, attributes of the +cib+ tag) rather than with the rest of the cluster configuration in the +configuration+ section. The reason is simply a matter of parsing. These options are used by the configuration database which is, by design, mostly ignorant of the content it holds. So the decision was made to place them in an easy-to-find location. .CIB Properties [width="95%",cols="2m,<5",options="header",align="center"] |========================================================= |Field |Description | admin_epoch | indexterm:[Configuration Version,Cluster] indexterm:[Cluster,Option,Configuration Version] indexterm:[admin_epoch,Cluster Option] indexterm:[Cluster,Option,admin_epoch] When a node joins the cluster, the cluster performs a check to see which node has the best configuration. It asks the node with the highest (+admin_epoch+, +epoch+, +num_updates+) tuple to replace the configuration on all the nodes -- which makes setting them, and setting them correctly, very important. +admin_epoch+ is never modified by the cluster; you can use this to make the configurations on any inactive nodes obsolete. _Never set this value to zero_. In such cases, the cluster cannot tell the difference between your configuration and the "empty" one used when nothing is found on disk. | epoch | indexterm:[epoch,Cluster Option] indexterm:[Cluster,Option,epoch] The cluster increments this every time the configuration is updated (usually by the administrator). | num_updates | indexterm:[num_updates,Cluster Option] indexterm:[Cluster,Option,num_updates] The cluster increments this every time the configuration or status is updated (usually by the cluster) and resets it to 0 when epoch changes. | validate-with | indexterm:[validate-with,Cluster Option] indexterm:[Cluster,Option,validate-with] Determines the type of XML validation that will be done on the configuration. If set to +none+, the cluster will not verify that updates conform to the DTD (nor reject ones that don't). This option can be useful when operating a mixed-version cluster during an upgrade. |cib-last-written | indexterm:[cib-last-written,Cluster Property] indexterm:[Cluster,Property,cib-last-written] Indicates when the configuration was last written to disk. Maintained by the cluster; for informational purposes only. |have-quorum | indexterm:[have-quorum,Cluster Property] indexterm:[Cluster,Property,have-quorum] Indicates if the cluster has quorum. If false, this may mean that the cluster cannot start resources or fence other nodes (see +no-quorum-policy+ below). Maintained by the cluster. |dc-uuid | indexterm:[dc-uuid,Cluster Property] indexterm:[Cluster,Property,dc-uuid] Indicates which cluster node is the current leader. Used by the cluster when placing resources and determining the order of some events. Maintained by the cluster. |========================================================= [[s-cluster-options]] == Cluster Options == Cluster options, as you might expect, control how the cluster behaves when confronted with certain situations. They are grouped into sets within the +crm_config+ section, and, in advanced configurations, there may be more than one set. (This will be described later in the section on <> where we will show how to have the cluster use different sets of options during working hours than during weekends.) For now, we will describe the simple case where each option is present at most once. You can obtain an up-to-date list of cluster options, including their default values, by running the `man pacemaker-schedulerd` and `man pacemaker-controld` commands. .Cluster Options [width="95%",cols="5m,2,<11",options="header",align="center"] |========================================================= |Option |Default |Description | cluster-name | | indexterm:[cluster-name,Cluster Property] indexterm:[Cluster,Property,cluster-name] An (optional) name for the cluster as a whole. This is mostly for users' convenience for use as desired in administration, but this can be used in the Pacemaker configuration in <> (as the +#cluster-name+ <>). It may also be used by higher-level tools when displaying cluster information, and by certain resource agents (for example, the +ocf:heartbeat:GFS2+ agent stores the cluster name in filesystem meta-data). | dc-version | | indexterm:[dc-version,Cluster Property] indexterm:[Cluster,Property,dc-version] Version of Pacemaker on the cluster's DC. Determined automatically by the cluster. Often includes the hash which identifies the exact Git changeset it was built from. Used for diagnostic purposes. | cluster-infrastructure | | indexterm:[cluster-infrastructure,Cluster Property] indexterm:[Cluster,Property,cluster-infrastructure] The messaging stack on which Pacemaker is currently running. Determined automatically by the cluster. Used for informational and diagnostic purposes. | no-quorum-policy | stop a| indexterm:[no-quorum-policy,Cluster Option] indexterm:[Cluster,Option,no-quorum-policy] What to do when the cluster does not have quorum. Allowed values: * +ignore:+ continue all resource management * +freeze:+ continue resource management, but don't recover resources from nodes not in the affected partition * +stop:+ stop all resources in the affected cluster partition * +suicide:+ fence all nodes in the affected cluster partition | batch-limit | 0 | indexterm:[batch-limit,Cluster Option] indexterm:[Cluster,Option,batch-limit] The maximum number of actions that the cluster may execute in parallel across all nodes. The "correct" value will depend on the speed and load of your network and cluster nodes. If zero, the cluster will impose a dynamically calculated limit only when any node has high load. | migration-limit | -1 | indexterm:[migration-limit,Cluster Option] indexterm:[Cluster,Option,migration-limit] The number of <> actions that the cluster is allowed to execute in parallel on a node. A value of -1 means unlimited. | symmetric-cluster | TRUE | indexterm:[symmetric-cluster,Cluster Option] indexterm:[Cluster,Option,symmetric-cluster] Can all resources run on any node by default? | stop-all-resources | FALSE | indexterm:[stop-all-resources,Cluster Option] indexterm:[Cluster,Option,stop-all-resources] Should the cluster stop all resources? | stop-orphan-resources | TRUE | indexterm:[stop-orphan-resources,Cluster Option] indexterm:[Cluster,Option,stop-orphan-resources] Should deleted resources be stopped? This value takes precedence over +is-managed+ (i.e. even unmanaged resources will be stopped if deleted from the configuration when this value is TRUE). | stop-orphan-actions | TRUE | indexterm:[stop-orphan-actions,Cluster Option] indexterm:[Cluster,Option,stop-orphan-actions] Should deleted actions be cancelled? | start-failure-is-fatal | TRUE | indexterm:[start-failure-is-fatal,Cluster Option] indexterm:[Cluster,Option,start-failure-is-fatal] Should a failure to start a resource on a particular node prevent further start attempts on that node? If FALSE, the cluster will decide whether the same node is still eligible based on the resource's current failure count and +migration-threshold+ (see <>). | enable-startup-probes | TRUE | indexterm:[enable-startup-probes,Cluster Option] indexterm:[Cluster,Option,enable-startup-probes] Should the cluster check for active resources during startup? | maintenance-mode | FALSE | indexterm:[maintenance-mode,Cluster Option] indexterm:[Cluster,Option,maintenance-mode] Should the cluster refrain from monitoring, starting and stopping resources? | stonith-enabled | TRUE | indexterm:[stonith-enabled,Cluster Option] indexterm:[Cluster,Option,stonith-enabled] Should failed nodes and nodes with resources that can't be stopped be shot? If you value your data, set up a STONITH device and enable this. If true, or unset, the cluster will refuse to start resources unless one or more STONITH resources have been configured. If false, unresponsive nodes are immediately assumed to be running no resources, and resource takeover to online nodes starts without any further protection (which means _data loss_ if the unresponsive node still accesses shared storage, for example). See also the +requires+ meta-attribute in <>. | stonith-action | reboot | indexterm:[stonith-action,Cluster Option] indexterm:[Cluster,Option,stonith-action] Action to send to STONITH device. Allowed values are +reboot+ and +off+. The value +poweroff+ is also allowed, but is only used for legacy devices. | stonith-timeout | 60s | indexterm:[stonith-timeout,Cluster Option] indexterm:[Cluster,Option,stonith-timeout] How long to wait for STONITH actions (reboot, on, off) to complete | stonith-max-attempts | 10 | indexterm:[stonith-max-attempts,Cluster Option] indexterm:[Cluster,Option,stonith-max-attempts] How many times fencing can fail for a target before the cluster will no longer immediately re-attempt it. | stonith-watchdog-timeout | 0 | indexterm:[stonith-watchdog-timeout,Cluster Option] indexterm:[Cluster,Option,stonith-watchdog-timeout] If nonzero, rely on hardware watchdog self-fencing. If positive, assume unseen nodes self-fence within this much time. If negative, and the SBD_WATCHDOG_TIMEOUT environment variable is set, use twice that value. | concurrent-fencing | FALSE | indexterm:[concurrent-fencing,Cluster Option] indexterm:[Cluster,Option,concurrent-fencing] Is the cluster allowed to initiate multiple fence actions concurrently? | fence-reaction | stop | indexterm:[fence-reaction,Cluster Option] indexterm:[Cluster,Option,fence-reaction] How should a cluster node react if notified of its own fencing? A cluster node may receive notification of its own fencing if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Allowed values are +stop+ to attempt to immediately stop pacemaker and stay stopped, or +panic+ to attempt to immediately reboot the local node, falling back to stop on failure. The default is likely to be changed to +panic+ in a future release. '(since 2.0.3)' | priority-fencing-delay | | indexterm:[priority-fencing-delay,Cluster Option] indexterm:[Cluster,Option,priority-fencing-delay] Enforce specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. If all the nodes have equal priority, then any pcmk_delay_base/max configured for the corresponding fencing resources will be applied. Otherwise as long as it's set, even if to 0, it takes precedence over any configured pcmk_delay_base/max. By default, priority fencing -delay is disabled. +delay is disabled. '(since 2.0.4)' | cluster-delay | 60s | indexterm:[cluster-delay,Cluster Option] indexterm:[Cluster,Option,cluster-delay] Estimated maximum round-trip delay over the network (excluding action execution). If the DC requires an action to be executed on another node, it will consider the action failed if it does not get a response from the other node in this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. | dc-deadtime | 20s | indexterm:[dc-deadtime,Cluster Option] indexterm:[Cluster,Option,dc-deadtime] How long to wait for a response from other nodes during startup. The "correct" value will depend on the speed/load of your network and the type of switches used. | cluster-ipc-limit | 500 | indexterm:[cluster-ipc-limit,Cluster Option] indexterm:[Cluster,Option,cluster-ipc-limit] The maximum IPC message backlog before one cluster daemon will disconnect another. This is of use in large clusters, for which a good value is the number of resources in the cluster multiplied by the number of nodes. The default of 500 is also the minimum. Raise this if you see "Evicting client" messages for cluster daemon PIDs in the logs. | pe-error-series-max | -1 | indexterm:[pe-error-series-max,Cluster Option] indexterm:[Cluster,Option,pe-error-series-max] The number of PE inputs resulting in ERRORs to save. Used when reporting problems. A value of -1 means unlimited (report all). | pe-warn-series-max | -1 | indexterm:[pe-warn-series-max,Cluster Option] indexterm:[Cluster,Option,pe-warn-series-max] The number of PE inputs resulting in WARNINGs to save. Used when reporting problems. A value of -1 means unlimited (report all). | pe-input-series-max | -1 | indexterm:[pe-input-series-max,Cluster Option] indexterm:[Cluster,Option,pe-input-series-max] The number of "normal" PE inputs to save. Used when reporting problems. A value of -1 means unlimited (report all). | placement-strategy | default | indexterm:[placement-strategy,Cluster Option] indexterm:[Cluster,Option,placement-strategy] How the cluster should allocate resources to nodes (see <>). Allowed values are +default+, +utilization+, +balanced+, and +minimal+. | node-health-strategy | none | indexterm:[node-health-strategy,Cluster Option] indexterm:[Cluster,Option,node-health-strategy] How the cluster should react to node health attributes (see <>). Allowed values are +none+, +migrate-on-red+, +only-green+, +progressive+, and +custom+. | enable-acl | FALSE | indexterm:[enable-acl,Cluster Option] indexterm:[Cluster,Option,enable-acl] Whether access control lists (ACLs) (see <>) can be used to authorize modifications to the CIB. | node-health-base | 0 | indexterm:[node-health-base,Cluster Option] indexterm:[Cluster,Option,node-health-base] The base health score assigned to a node. Only used when +node-health-strategy+ is +progressive+. | node-health-green | 0 | indexterm:[node-health-green,Cluster Option] indexterm:[Cluster,Option,node-health-green] The score to use for a node health attribute whose value is +green+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | node-health-yellow | 0 | indexterm:[node-health-yellow,Cluster Option] indexterm:[Cluster,Option,node-health-yellow] The score to use for a node health attribute whose value is +yellow+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | node-health-red | 0 | indexterm:[node-health-red,Cluster Option] indexterm:[Cluster,Option,node-health-red] The score to use for a node health attribute whose value is +red+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | cluster-recheck-interval | 15min | indexterm:[cluster-recheck-interval,Cluster Option] indexterm:[Cluster,Option,cluster-recheck-interval] Pacemaker is primarily event-driven, and looks ahead to know when to recheck the cluster for failure timeouts and most time-based rules. However, it will also recheck the cluster after this amount of inactivity. This has two goals: rules with +date_spec+ are only guaranteed to be checked this often, and it also serves as a fail-safe for certain classes of scheduler bugs. A value of 0 disables this polling; positive values are a time interval. | shutdown-lock | false | The default of false allows active resources to be recovered elsewhere when their node is cleanly shut down, which is what the vast majority of users will want. However, some users prefer to make resources highly available only for failures, with no recovery for clean shutdowns. If this option is true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the master role of promotable clones are currently never locked, though support could be added in a future release. Locks may be manually cleared using the `--refresh` option of `crm_resource` (both the resource and node must be specified; this works with remote nodes if their connection resource's target-role is set to Stopped, but not if Pacemaker Remote is stopped on the remote node without disabling the connection resource). +'(since 2.0.4)' indexterm:[shutdown-lock,Cluster Option] indexterm:[Cluster,Option,shutdown-lock] | shutdown-lock-limit | 0 | If shutdown-lock is true, and this is set to a nonzero time duration, locked resources will be allowed to start after this much time has passed since the node shutdown was initiated, even if the node has not rejoined. (This works with remote nodes only if their connection resource's target-role is set to -Stopped.) +Stopped.) '(since 2.0.4)' indexterm:[shutdown-lock-limit,Cluster Option] indexterm:[Cluster,Option,shutdown-lock-limit] | remove-after-stop | FALSE | indexterm:[remove-after-stop,Cluster Option] indexterm:[Cluster,Option,remove-after-stop] _Advanced Use Only:_ Should the cluster remove resources from the LRM after they are stopped? Values other than the default are, at best, poorly tested and potentially dangerous. | startup-fencing | TRUE | indexterm:[startup-fencing,Cluster Option] indexterm:[Cluster,Option,startup-fencing] _Advanced Use Only:_ Should the cluster shoot unseen nodes? Not using the default is very unsafe! | election-timeout | 2min | indexterm:[election-timeout,Cluster Option] indexterm:[Cluster,Option,election-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | shutdown-escalation | 20min | indexterm:[shutdown-escalation,Cluster Option] indexterm:[Cluster,Option,shutdown-escalation] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | join-integration-timeout | 3min | indexterm:[join-integration-timeout,Cluster Option] indexterm:[Cluster,Option,join-integration-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | join-finalization-timeout | 30min | indexterm:[join-finalization-timeout,Cluster Option] indexterm:[Cluster,Option,join-finalization-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | transition-delay | 0s | indexterm:[transition-delay,Cluster Option] indexterm:[Cluster,Option,transition-delay] _Advanced Use Only:_ Delay cluster recovery for the configured interval to allow for additional/related events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. Enabling this option will slow down cluster recovery under all conditions. |========================================================= diff --git a/include/crm/common/internal.h b/include/crm/common/internal.h index 73ae4c7092..42f68d0d50 100644 --- a/include/crm/common/internal.h +++ b/include/crm/common/internal.h @@ -1,248 +1,269 @@ /* * Copyright 2015-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef CRM_COMMON_INTERNAL__H #define CRM_COMMON_INTERNAL__H #include // getpid() #include // bool #include // strcmp() #include // uid_t, gid_t, pid_t #include // guint, GList, GHashTable #include // xmlNode #include // crm_strdup_printf() // Internal ACL-related utilities (from acl.c) char *pcmk__uid2username(uid_t uid); const char *pcmk__update_acl_user(xmlNode *request, const char *field, const char *peer_user); #if ENABLE_ACL # include static inline bool pcmk__is_privileged(const char *user) { return user && (!strcmp(user, CRM_DAEMON_USER) || !strcmp(user, "root")); } #endif #if SUPPORT_CIBSECRETS // Internal CIB utilities (from cib_secrets.c) */ int pcmk__substitute_secrets(const char *rsc_id, GHashTable *params); #endif /* internal digest-related utilities (from digest.c) */ bool pcmk__verify_digest(xmlNode *input, const char *expected); /* internal I/O utilities (from io.c) */ int pcmk__real_path(const char *path, char **resolved_path); char *pcmk__series_filename(const char *directory, const char *series, int sequence, bool bzip); int pcmk__read_series_sequence(const char *directory, const char *series, unsigned int *seq); void pcmk__write_series_sequence(const char *directory, const char *series, unsigned int sequence, int max); int pcmk__chown_series_sequence(const char *directory, const char *series, uid_t uid, gid_t gid); bool pcmk__daemon_can_write(const char *dir, const char *file); void pcmk__sync_directory(const char *name); int pcmk__file_contents(const char *filename, char **contents); int pcmk__write_sync(int fd, const char *contents); int pcmk__set_nonblocking(int fd); const char *pcmk__get_tmpdir(void); void pcmk__close_fds_in_child(bool); +/*! + * \internal + * \brief Open /dev/null to consume next available file descriptor + * + * Open /dev/null, disregarding the result. This is intended when daemonizing to + * be able to null stdin, stdout, and stderr. + * + * \param[in] flags O_RDONLY (stdin) or O_WRONLY (stdout and stderr) + */ +static inline void +pcmk__open_devnull(int flags) +{ + // Static analysis clutter + // cppcheck-suppress leakReturnValNotUsed + (void) open("/dev/null", flags); +} + /* internal logging utilities */ # define pcmk__config_err(fmt...) do { \ crm_config_error = TRUE; \ crm_err(fmt); \ } while (0) # define pcmk__config_warn(fmt...) do { \ crm_config_warning = TRUE; \ crm_warn(fmt); \ } while (0) /* internal procfs utilities (from procfs.c) */ pid_t pcmk__procfs_pid_of(const char *name); unsigned int pcmk__procfs_num_cores(void); /* internal XML schema functions (from xml.c) */ void crm_schema_init(void); void crm_schema_cleanup(void); /* internal functions related to process IDs (from pid.c) */ /*! * \internal * \brief Check whether process exists (by PID and optionally executable path) * * \param[in] pid PID of process to check * \param[in] daemon If not NULL, path component to match with procfs entry * * \return Standard Pacemaker return code * \note Particular return codes of interest include pcmk_rc_ok for alive, * ESRCH for process is not alive (verified by kill and/or executable path * match), EACCES for caller unable or not allowed to check. A result of * "alive" is less reliable when \p daemon is not provided or procfs is * not available, since there is no guarantee that the PID has not been * recycled for another process. * \note This function cannot be used to verify \e authenticity of the process. */ int pcmk__pid_active(pid_t pid, const char *daemon); int pcmk__read_pidfile(const char *filename, pid_t *pid); int pcmk__pidfile_matches(const char *filename, pid_t expected_pid, const char *expected_name, pid_t *pid); int pcmk__lock_pidfile(const char *filename, const char *name); /* interal functions related to resource operations (from operations.c) */ // printf-style format to create operation ID from resource, action, interval #define PCMK__OP_FMT "%s_%s_%u" char *pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms); char *pcmk__notify_key(const char *rsc_id, const char *notify_type, const char *op_type); char *pcmk__transition_key(int transition_id, int action_id, int target_rc, const char *node); void pcmk__filter_op_for_digest(xmlNode *param_set); // miscellaneous utilities (from utils.c) const char *pcmk_message_name(const char *name); +extern int pcmk__score_red; +extern int pcmk__score_green; +extern int pcmk__score_yellow; + /* internal generic string functions (from strings.c) */ int pcmk__guint_from_hash(GHashTable *table, const char *key, guint default_val, guint *result); bool pcmk__starts_with(const char *str, const char *prefix); bool pcmk__ends_with(const char *s, const char *match); bool pcmk__ends_with_ext(const char *s, const char *match); char *pcmk__add_word(char *list, const char *word); int pcmk__compress(const char *data, unsigned int length, unsigned int max, char **result, unsigned int *result_len); /* Correctly displaying singular or plural is complicated; consider "1 node has" * vs. "2 nodes have". A flexible solution is to pluralize entire strings, e.g. * * if (a == 1) { * crm_info("singular message"): * } else { * crm_info("plural message"); * } * * though even that's not sufficient for all languages besides English (if we * ever desire to do translations of output and log messages). But the following * convenience macros are "good enough" and more concise for many cases. */ /* Example: * crm_info("Found %d %s", nentries, * pcmk__plural_alt(nentries, "entry", "entries")); */ #define pcmk__plural_alt(i, s1, s2) (((i) == 1)? (s1) : (s2)) // Example: crm_info("Found %d node%s", nnodes, pcmk__plural_s(nnodes)); #define pcmk__plural_s(i) pcmk__plural_alt(i, "", "s") static inline int pcmk__str_empty(const char *s) { return (s == NULL) || (s[0] == '\0'); } static inline char * pcmk__getpid_s() { return crm_strdup_printf("%lu", (unsigned long) getpid()); } // More efficient than g_list_length(list) == 1 static inline bool pcmk__list_of_1(GList *list) { return list && (list->next == NULL); } // More efficient than g_list_length(list) > 1 static inline bool pcmk__list_of_multiple(GList *list) { return list && (list->next != NULL); } /* convenience functions for failure-related node attributes */ #define PCMK__FAIL_COUNT_PREFIX "fail-count" #define PCMK__LAST_FAILURE_PREFIX "last-failure" /*! * \internal * \brief Generate a failure-related node attribute name for a resource * * \param[in] prefix Start of attribute name * \param[in] rsc_id Resource name * \param[in] op Operation name * \param[in] interval_ms Operation interval * * \return Newly allocated string with attribute name * * \note Failure attributes are named like PREFIX-RSC#OP_INTERVAL (for example, * "fail-count-myrsc#monitor_30000"). The '#' is used because it is not * a valid character in a resource ID, to reliably distinguish where the * operation name begins. The '_' is used simply to be more comparable to * action labels like "myrsc_monitor_30000". */ static inline char * pcmk__fail_attr_name(const char *prefix, const char *rsc_id, const char *op, guint interval_ms) { CRM_CHECK(prefix && rsc_id && op, return NULL); return crm_strdup_printf("%s-%s#%s_%u", prefix, rsc_id, op, interval_ms); } static inline char * pcmk__failcount_name(const char *rsc_id, const char *op, guint interval_ms) { return pcmk__fail_attr_name(PCMK__FAIL_COUNT_PREFIX, rsc_id, op, interval_ms); } static inline char * pcmk__lastfailure_name(const char *rsc_id, const char *op, guint interval_ms) { return pcmk__fail_attr_name(PCMK__LAST_FAILURE_PREFIX, rsc_id, op, interval_ms); } #endif /* CRM_COMMON_INTERNAL__H */ diff --git a/include/crm/common/logging.h b/include/crm/common/logging.h index 97ad261c00..9b0bc06380 100644 --- a/include/crm/common/logging.h +++ b/include/crm/common/logging.h @@ -1,412 +1,422 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #ifdef __cplusplus extern "C" { #endif /** * \file * \brief Wrappers for and extensions to libqb logging * \ingroup core */ #ifndef CRM_LOGGING__H # define CRM_LOGGING__H # include # include /* Define custom log priorities. * * syslog(3) uses int for priorities, but libqb's struct qb_log_callsite uses * uint8_t, so make sure they fit in the latter. */ // Define something even less desired than debug # ifndef LOG_TRACE # define LOG_TRACE (LOG_DEBUG+1) # endif // Print message to stdout instead of logging it # ifndef LOG_STDOUT # define LOG_STDOUT 254 # endif // Don't send message anywhere # ifndef LOG_NEVER # define LOG_NEVER 255 # endif /* "Extended information" logging support */ #ifdef QB_XS # define CRM_XS QB_XS # define crm_extended_logging(t, e) qb_log_ctl((t), QB_LOG_CONF_EXTENDED, (e)) #else # define CRM_XS "|" /* A caller might want to check the return value, so we can't define this as a * no-op, and we can't simply define it to be 0 because gcc will then complain * when the value isn't checked. */ static inline int crm_extended_logging(int t, int e) { return 0; } #endif extern unsigned int crm_log_level; extern gboolean crm_config_error; extern gboolean crm_config_warning; extern unsigned int crm_trace_nonlog; enum xml_log_options { xml_log_option_filtered = 0x0001, xml_log_option_formatted = 0x0002, xml_log_option_text = 0x0004, /* add this option to dump text into xml */ xml_log_option_full_fledged = 0x0008, /* crm_xml_dump: serialize using libxml */ xml_log_option_diff_plus = 0x0010, xml_log_option_diff_minus = 0x0020, xml_log_option_diff_short = 0x0040, xml_log_option_diff_all = 0x0100, xml_log_option_dirty_add = 0x1000, xml_log_option_open = 0x2000, xml_log_option_children = 0x4000, xml_log_option_close = 0x8000, }; void crm_enable_blackbox(int nsig); void crm_disable_blackbox(int nsig); void crm_write_blackbox(int nsig, struct qb_log_callsite *callsite); void crm_update_callsites(void); void crm_log_deinit(void); gboolean crm_log_cli_init(const char *entity); void crm_log_preinit(const char *entity, int argc, char **argv); gboolean crm_log_init(const char *entity, uint8_t level, gboolean daemon, gboolean to_stderr, int argc, char **argv, gboolean quiet); void crm_log_args(int argc, char **argv); void crm_log_output_fn(const char *file, const char *function, int line, int level, const char *prefix, const char *output); // Log a block of text line by line # define crm_log_output(level, prefix, output) crm_log_output_fn(__FILE__, __FUNCTION__, __LINE__, level, prefix, output) gboolean crm_add_logfile(const char *filename); void crm_bump_log_level(int argc, char **argv); void crm_enable_stderr(int enable); gboolean crm_is_callsite_active(struct qb_log_callsite *cs, uint8_t level, uint32_t tags); void log_data_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, gboolean formatted); /* returns the old value */ unsigned int set_crm_log_level(unsigned int level); unsigned int get_crm_log_level(void); /* * Throughout the macros below, note the leading, pre-comma, space in the * various ' , ##args' occurrences to aid portability across versions of 'gcc'. * https://gcc.gnu.org/onlinedocs/cpp/Variadic-Macros.html#Variadic-Macros */ #if defined(__clang__) # define CRM_TRACE_INIT_DATA(name) # else # include // required by QB_LOG_INIT_DATA() macro # define CRM_TRACE_INIT_DATA(name) QB_LOG_INIT_DATA(name) #endif /* Using "switch" instead of "if" in these macro definitions keeps * static analysis from complaining about constant evaluations */ /*! * \brief Log a message * * \param[in] level Priority at which to log the message * \param[in] fmt printf-style format string literal for message * \param[in] args Any arguments needed by format string * * \note This is a macro, and \p level may be evaluated more than once. */ # define do_crm_log(level, fmt, args...) do { \ switch (level) { \ case LOG_STDOUT: \ printf(fmt "\n" , ##args); \ break; \ case LOG_NEVER: \ break; \ default: \ qb_log_from_external_source(__func__, __FILE__, fmt, \ (level), __LINE__, 0 , ##args); \ break; \ } \ } while (0) /*! * \brief Log a message that is likely to be filtered out * * \param[in] level Priority at which to log the message * \param[in] fmt printf-style format string for message * \param[in] args Any arguments needed by format string * * \note This is a macro, and \p level may be evaluated more than once. * This does nothing when level is LOG_STDOUT. */ # define do_crm_log_unlikely(level, fmt, args...) do { \ switch (level) { \ case LOG_STDOUT: case LOG_NEVER: \ break; \ default: { \ static struct qb_log_callsite *trace_cs = NULL; \ if (trace_cs == NULL) { \ trace_cs = qb_log_callsite_get(__func__, __FILE__, fmt, \ (level), __LINE__, 0); \ } \ if (crm_is_callsite_active(trace_cs, (level), 0)) { \ qb_log_from_external_source(__func__, __FILE__, fmt, \ (level), __LINE__, 0 , ##args); \ } \ } \ break; \ } \ } while (0) /*! * \internal * \brief Execute code depending on whether message would be logged * * This is similar to do_crm_log_unlikely() except instead of logging, it either * continues past this statement or executes else_action depending on whether a * message of the given severity would be logged or not. This allows whole * blocks of code to be skipped if tracing or debugging is turned off. * * \param[in] level Severity at which to continue past this statement * \param[in] else_action Code block to execute if severity would not be logged * * \note else_action must not contain a break or continue statement */ # define pcmk__log_else(level, else_action) do { \ static struct qb_log_callsite *trace_cs = NULL; \ \ if (trace_cs == NULL) { \ trace_cs = qb_log_callsite_get(__func__, __FILE__, "log_else", \ level, __LINE__, 0); \ } \ if (!crm_is_callsite_active(trace_cs, level, 0)) { \ else_action; \ } \ } while(0) # define CRM_LOG_ASSERT(expr) do { \ if(__unlikely((expr) == FALSE)) { \ static struct qb_log_callsite *core_cs = NULL; \ if(core_cs == NULL) { \ core_cs = qb_log_callsite_get(__func__, __FILE__, "log-assert", LOG_TRACE, __LINE__, 0); \ } \ crm_abort(__FILE__, __FUNCTION__, __LINE__, #expr, \ core_cs?core_cs->targets:FALSE, TRUE); \ } \ } while(0) /* 'failure_action' MUST NOT be 'continue' as it will apply to the * macro's do-while loop */ # define CRM_CHECK(expr, failure_action) do { \ if(__unlikely((expr) == FALSE)) { \ static struct qb_log_callsite *core_cs = NULL; \ if(core_cs == NULL) { \ core_cs = qb_log_callsite_get(__func__, __FILE__, "check-assert", LOG_TRACE, __LINE__, 0); \ } \ crm_abort(__FILE__, __FUNCTION__, __LINE__, #expr, \ core_cs?core_cs->targets:FALSE, TRUE); \ failure_action; \ } \ } while(0) /*! * \brief Log XML line-by-line in a formatted fashion * * \param[in] level Priority at which to log the messages * \param[in] text Prefix for each line * \param[in] xml XML to log * * \note This is a macro, and \p level may be evaluated more than once. * This does nothing when level is LOG_STDOUT. */ # define do_crm_log_xml(level, text, xml) do { \ switch (level) { \ case LOG_STDOUT: case LOG_NEVER: \ break; \ default: { \ static struct qb_log_callsite *xml_cs = NULL; \ if (xml_cs == NULL) { \ xml_cs = qb_log_callsite_get(__func__, __FILE__, \ "xml-blob", (level), __LINE__, 0); \ } \ if (crm_is_callsite_active(xml_cs, (level), 0)) { \ log_data_element((level), __FILE__, __FUNCTION__, \ __LINE__, text, xml, 1, xml_log_option_formatted); \ } \ } \ break; \ } \ } while(0) /*! * \brief Log a message as if it came from a different code location * * \param[in] level Priority at which to log the message * \param[in] file Source file name to use instead of __FILE__ * \param[in] function Source function name to use instead of __func__ * \param[in] line Source line number to use instead of __line__ * \param[in] fmt printf-style format string literal for message * \param[in] args Any arguments needed by format string * * \note This is a macro, and \p level may be evaluated more than once. */ # define do_crm_log_alias(level, file, function, line, fmt, args...) do { \ switch (level) { \ case LOG_STDOUT: \ printf(fmt "\n" , ##args); \ break; \ case LOG_NEVER: \ break; \ default: \ qb_log_from_external_source(function, file, fmt, (level), \ line, 0 , ##args); \ break; \ } \ } while (0) -/*! - * \brief Log a message using constant priority - * - * \param[in] level Priority at which to log the message - * \param[in] fmt printf-style format string literal for message - * \param[in] args Any arguments needed by format string - * - * \note This is a macro, and \p level may be evaluated more than once. - * This does nothing when level is LOG_STDOUT. - */ -# define do_crm_log_always(level, fmt, args...) do { \ - switch (level) { \ - case LOG_STDOUT: case LOG_NEVER: \ - break; \ - default: \ - qb_log((level), fmt , ##args); \ - break; \ - } \ - } while (0) - /*! * \brief Send a system error message to both the log and stderr * * \param[in] level Priority at which to log the message * \param[in] fmt printf-style format string for message * \param[in] args Any arguments needed by format string * * \deprecated One of the other logging functions should be used with * pcmk_strerror() instead. * \note This is a macro, and \p level may be evaluated more than once. * \note Because crm_perror() adds the system error message and error number * onto the end of fmt, that information will become extended information * if CRM_XS is used inside fmt and will not show up in syslog. */ # define crm_perror(level, fmt, args...) do { \ switch (level) { \ case LOG_NEVER: \ break; \ default: { \ const char *err = strerror(errno); \ /* cast to int makes coverity happy when level == 0 */ \ if ((level) <= (int) crm_log_level) { \ fprintf(stderr, fmt ": %s (%d)\n" , ##args, err, errno);\ } \ do_crm_log((level), fmt ": %s (%d)" , ##args, err, errno); \ } \ break; \ } \ } while (0) /*! * \brief Log a message with a tag (for use with PCMK_trace_tags) * * \param[in] level Priority at which to log the message * \param[in] tag String to tag message with * \param[in] fmt printf-style format string for message * \param[in] args Any arguments needed by format string * * \note This is a macro, and \p level may be evaluated more than once. * This does nothing when level is LOG_STDOUT. */ # define crm_log_tag(level, tag, fmt, args...) do { \ switch (level) { \ case LOG_STDOUT: case LOG_NEVER: \ break; \ default: { \ static struct qb_log_callsite *trace_tag_cs = NULL; \ int converted_tag = g_quark_try_string(tag); \ if (trace_tag_cs == NULL) { \ trace_tag_cs = qb_log_callsite_get(__func__, __FILE__, \ fmt, (level), __LINE__, converted_tag); \ } \ if (crm_is_callsite_active(trace_tag_cs, (level), \ converted_tag)) { \ qb_log_from_external_source(__func__, __FILE__, fmt, \ (level), __LINE__, converted_tag , ##args); \ } \ } \ } \ } while (0) +# define crm_emerg(fmt, args...) qb_log(LOG_EMERG, fmt , ##args) # define crm_crit(fmt, args...) qb_logt(LOG_CRIT, 0, fmt , ##args) # define crm_err(fmt, args...) qb_logt(LOG_ERR, 0, fmt , ##args) # define crm_warn(fmt, args...) qb_logt(LOG_WARNING, 0, fmt , ##args) # define crm_notice(fmt, args...) qb_logt(LOG_NOTICE, 0, fmt , ##args) # define crm_info(fmt, args...) qb_logt(LOG_INFO, 0, fmt , ##args) # define crm_debug(fmt, args...) do_crm_log_unlikely(LOG_DEBUG, fmt , ##args) # define crm_trace(fmt, args...) do_crm_log_unlikely(LOG_TRACE, fmt , ##args) # define crm_log_xml_crit(xml, text) do_crm_log_xml(LOG_CRIT, text, xml) # define crm_log_xml_err(xml, text) do_crm_log_xml(LOG_ERR, text, xml) # define crm_log_xml_warn(xml, text) do_crm_log_xml(LOG_WARNING, text, xml) # define crm_log_xml_notice(xml, text) do_crm_log_xml(LOG_NOTICE, text, xml) # define crm_log_xml_info(xml, text) do_crm_log_xml(LOG_INFO, text, xml) # define crm_log_xml_debug(xml, text) do_crm_log_xml(LOG_DEBUG, text, xml) # define crm_log_xml_trace(xml, text) do_crm_log_xml(LOG_TRACE, text, xml) # define crm_log_xml_explicit(xml, text) do { \ static struct qb_log_callsite *digest_cs = NULL; \ digest_cs = qb_log_callsite_get( \ __func__, __FILE__, text, LOG_TRACE, __LINE__, \ crm_trace_nonlog); \ if (digest_cs && digest_cs->targets) { \ do_crm_log_xml(LOG_TRACE, text, xml); \ } \ } while(0) # define crm_str(x) (const char*)(x?x:"") +#ifndef PCMK__NO_COMPAT + +/* Everything here is deprecated and kept only for public API backward + * compatibility. It will be moved to compatibility.h when 2.1.0 is released. + */ + +/*! + * \brief Log a message using constant priority + * + * \param[in] level Priority at which to log the message + * \param[in] fmt printf-style format string literal for message + * \param[in] args Any arguments needed by format string + * + * \deprecated Use one of the other logging functions instead + * \note This is a macro, and \p level may be evaluated more than once. + * This does nothing when level is LOG_STDOUT. + */ +# define do_crm_log_always(level, fmt, args...) do { \ + switch (level) { \ + case LOG_STDOUT: case LOG_NEVER: \ + break; \ + default: \ + qb_log((level), fmt , ##args); \ + break; \ + } \ + } while (0) + +#endif // PCMK__NO_COMPAT + #ifdef __cplusplus } #endif #endif diff --git a/include/crm/lrmd_internal.h b/include/crm/lrmd_internal.h index bc48fe4226..498a9baf2f 100644 --- a/include/crm/lrmd_internal.h +++ b/include/crm/lrmd_internal.h @@ -1,33 +1,69 @@ /* - * Copyright 2015-2019 the Pacemaker project contributors + * Copyright 2015-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef LRMD_INTERNAL__H #define LRMD_INTERNAL__H #include // uint32_t -#include // GList +#include // GList, GHashTable, gpointer #include // xmlNode +#include // crm_ipc_t +#include // mainloop_io_t, ipc_client_callbacks #include // pcmk__remote_t #include // lrmd_t, lrmd_event_data_t int lrmd_send_attribute_alert(lrmd_t *lrmd, GList *alert_list, const char *node, uint32_t nodeid, const char *attr_name, const char *attr_value); int lrmd_send_node_alert(lrmd_t *lrmd, GList *alert_list, const char *node, uint32_t nodeid, const char *state); int lrmd_send_fencing_alert(lrmd_t *lrmd, GList *alert_list, const char *target, const char *task, const char *desc, int op_rc); int lrmd_send_resource_alert(lrmd_t *lrmd, GList *alert_list, const char *node, lrmd_event_data_t *op); int lrmd_tls_send_msg(pcmk__remote_t *session, xmlNode *msg, uint32_t id, const char *msg_type); +/* Shared functions for IPC proxy back end */ + +typedef struct remote_proxy_s { + char *node_name; + char *session_id; + + gboolean is_local; + + crm_ipc_t *ipc; + mainloop_io_t *source; + uint32_t last_request_id; + lrmd_t *lrm; + +} remote_proxy_t; + +remote_proxy_t *remote_proxy_new(lrmd_t *lrmd, + struct ipc_client_callbacks *proxy_callbacks, + const char *node_name, const char *session_id, + const char *channel); + +int remote_proxy_check(lrmd_t *lrmd, GHashTable *hash); +void remote_proxy_cb(lrmd_t *lrmd, const char *node_name, xmlNode *msg); +void remote_proxy_ack_shutdown(lrmd_t *lrmd); +void remote_proxy_nack_shutdown(lrmd_t *lrmd); + +int remote_proxy_dispatch(const char *buffer, ssize_t length, + gpointer userdata); +void remote_proxy_disconnected(gpointer data); +void remote_proxy_free(gpointer data); + +void remote_proxy_relay_event(remote_proxy_t *proxy, xmlNode *msg); +void remote_proxy_relay_response(remote_proxy_t *proxy, xmlNode *msg, + int msg_id); + #endif diff --git a/include/crm_internal.h b/include/crm_internal.h index 93514e3dc0..51b16053a7 100644 --- a/include/crm_internal.h +++ b/include/crm_internal.h @@ -1,212 +1,174 @@ /* * Copyright 2006-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef CRM_INTERNAL__H # define CRM_INTERNAL__H # include # include # include # include # include /* Public API headers can guard deprecated code with this symbol, thus * preventing internal code (which includes this header) from using it, while * still allowing external code (which can't include this header) to use it, * for backward compatibility. */ #define PCMK__NO_COMPAT # include # include # include # include # include -/* Dynamic loading of libraries */ -void *find_library_function(void **handle, const char *lib, const char *fn, int fatal); - -/* char2score */ -extern int node_score_red; -extern int node_score_green; -extern int node_score_yellow; - /* Assorted convenience functions */ void crm_make_daemon(const char *name, gboolean daemonize, const char *pidfile); static inline long long crm_clear_bit(const char *function, int line, const char *target, long long word, long long bit) { long long rc = (word & ~bit); if (rc == word) { /* Unchanged */ } else if (target) { crm_trace("Bit 0x%.8llx for %s cleared by %s:%d", bit, target, function, line); } else { crm_trace("Bit 0x%.8llx cleared by %s:%d", bit, function, line); } return rc; } static inline long long crm_set_bit(const char *function, int line, const char *target, long long word, long long bit) { long long rc = (word | bit); if (rc == word) { /* Unchanged */ } else if (target) { crm_trace("Bit 0x%.8llx for %s set by %s:%d", bit, target, function, line); } else { crm_trace("Bit 0x%.8llx set by %s:%d", bit, function, line); } return rc; } # define set_bit(word, bit) word = crm_set_bit(__FUNCTION__, __LINE__, NULL, word, bit) # define clear_bit(word, bit) word = crm_clear_bit(__FUNCTION__, __LINE__, NULL, word, bit) char *generate_hash_key(const char *crm_msg_reference, const char *sys); void strip_text_nodes(xmlNode * xml); void pcmk_panic(const char *origin); pid_t pcmk_locate_sbd(void); /* * XML attribute names used only by internal code */ #define PCMK__XA_ATTR_DAMPENING "attr_dampening" #define PCMK__XA_ATTR_FORCE "attrd_is_force_write" #define PCMK__XA_ATTR_INTERVAL "attr_clear_interval" #define PCMK__XA_ATTR_IS_PRIVATE "attr_is_private" #define PCMK__XA_ATTR_IS_REMOTE "attr_is_remote" #define PCMK__XA_ATTR_NAME "attr_name" #define PCMK__XA_ATTR_NODE_ID "attr_host_id" #define PCMK__XA_ATTR_NODE_NAME "attr_host" #define PCMK__XA_ATTR_OPERATION "attr_clear_operation" #define PCMK__XA_ATTR_PATTERN "attr_regex" #define PCMK__XA_ATTR_RESOURCE "attr_resource" #define PCMK__XA_ATTR_SECTION "attr_section" #define PCMK__XA_ATTR_SET "attr_set" #define PCMK__XA_ATTR_USER "attr_user" #define PCMK__XA_ATTR_UUID "attr_key" #define PCMK__XA_ATTR_VALUE "attr_value" #define PCMK__XA_ATTR_VERSION "attr_version" #define PCMK__XA_ATTR_WRITER "attr_writer" #define PCMK__XA_MODE "mode" #define PCMK__XA_TASK "task" /* * IPC service names that are only used internally */ # define PCMK__SERVER_BASED_RO "cib_ro" # define PCMK__SERVER_BASED_RW "cib_rw" # define PCMK__SERVER_BASED_SHM "cib_shm" /* * IPC commands that can be sent to Pacemaker daemons */ #define PCMK__ATTRD_CMD_PEER_REMOVE "peer-remove" #define PCMK__ATTRD_CMD_UPDATE "update" #define PCMK__ATTRD_CMD_UPDATE_BOTH "update-both" #define PCMK__ATTRD_CMD_UPDATE_DELAY "update-delay" #define PCMK__ATTRD_CMD_QUERY "query" #define PCMK__ATTRD_CMD_REFRESH "refresh" #define PCMK__ATTRD_CMD_FLUSH "flush" #define PCMK__ATTRD_CMD_SYNC "sync" #define PCMK__ATTRD_CMD_SYNC_RESPONSE "sync-response" #define PCMK__ATTRD_CMD_CLEAR_FAILURE "clear-failure" /* * Environment variables used by Pacemaker */ #define PCMK__ENV_PHYSICAL_HOST "physical_host" # if SUPPORT_COROSYNC # include # include typedef struct qb_ipc_request_header cs_ipc_header_request_t; typedef struct qb_ipc_response_header cs_ipc_header_response_t; # else typedef struct { int size __attribute__ ((aligned(8))); int id __attribute__ ((aligned(8))); } __attribute__ ((aligned(8))) cs_ipc_header_request_t; typedef struct { int size __attribute__ ((aligned(8))); int id __attribute__ ((aligned(8))); int error __attribute__ ((aligned(8))); } __attribute__ ((aligned(8))) cs_ipc_header_response_t; # endif static inline void * realloc_safe(void *ptr, size_t size) { void *new_ptr; // realloc(p, 0) can replace free(p) but this wrapper can't CRM_ASSERT(size > 0); new_ptr = realloc(ptr, size); if (new_ptr == NULL) { free(ptr); abort(); } return new_ptr; } const char *crm_xml_add_last_written(xmlNode *xml_node); void crm_xml_dump(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth); void crm_buffer_add_char(char **buffer, int *offset, int *max, char c); -/* IPC Proxy Backend Shared Functions */ -typedef struct remote_proxy_s { - char *node_name; - char *session_id; - - gboolean is_local; - - crm_ipc_t *ipc; - mainloop_io_t *source; - uint32_t last_request_id; - lrmd_t *lrm; - -} remote_proxy_t; - -remote_proxy_t *remote_proxy_new( - lrmd_t *lrmd, struct ipc_client_callbacks *proxy_callbacks, - const char *node_name, const char *session_id, const char *channel); - -int remote_proxy_check(lrmd_t *lrmd, GHashTable *hash); -void remote_proxy_cb(lrmd_t *lrmd, const char *node_name, xmlNode *msg); -void remote_proxy_ack_shutdown(lrmd_t *lrmd); -void remote_proxy_nack_shutdown(lrmd_t *lrmd); - -int remote_proxy_dispatch(const char *buffer, ssize_t length, gpointer userdata); -void remote_proxy_disconnected(gpointer data); -void remote_proxy_free(gpointer data); - -void remote_proxy_relay_event(remote_proxy_t *proxy, xmlNode *msg); -void remote_proxy_relay_response(remote_proxy_t *proxy, xmlNode *msg, int msg_id); - #endif /* CRM_INTERNAL__H */ diff --git a/lib/cib/cib_file.c b/lib/cib/cib_file.c index 9ee8b1cdbf..87471b9793 100644 --- a/lib/cib/cib_file.c +++ b/lib/cib/cib_file.c @@ -1,871 +1,871 @@ /* * Original copyright 2004 International Business Machines * Later changes copyright 2008-2020 the Pacemaker project contributors * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define CIB_FLAG_DIRTY 0x00001 #define CIB_FLAG_LIVE 0x00002 typedef struct cib_file_opaque_s { int flags; char *filename; } cib_file_opaque_t; int cib_file_perform_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options); int cib_file_perform_op_delegate(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *user_name); int cib_file_signon(cib_t * cib, const char *name, enum cib_conn_type type); int cib_file_signoff(cib_t * cib); int cib_file_free(cib_t * cib); static int cib_file_inputfd(cib_t * cib) { return -EPROTONOSUPPORT; } static int cib_file_set_connection_dnotify(cib_t * cib, void (*dnotify) (gpointer user_data)) { return -EPROTONOSUPPORT; } static int cib_file_register_notification(cib_t * cib, const char *callback, int enabled) { return -EPROTONOSUPPORT; } /*! * \internal * \brief Compare the calculated digest of an XML tree against a signature file * * \param[in] root Root of XML tree to compare * \param[in] sigfile Name of signature file containing digest to compare * * \return TRUE if digests match or signature file does not exist, else FALSE */ static gboolean cib_file_verify_digest(xmlNode *root, const char *sigfile) { gboolean passed = FALSE; char *expected; int rc = pcmk__file_contents(sigfile, &expected); switch (rc) { case pcmk_rc_ok: if (expected == NULL) { crm_err("On-disk digest at %s is empty", sigfile); return FALSE; } break; case ENOENT: crm_warn("No on-disk digest present at %s", sigfile); return TRUE; default: crm_err("Could not read on-disk digest from %s: %s", sigfile, pcmk_rc_str(rc)); return FALSE; } passed = pcmk__verify_digest(root, expected); free(expected); return passed; } /*! * \internal * \brief Read an XML tree from a file and verify its digest * * \param[in] filename Name of XML file to read * \param[in] sigfile Name of signature file containing digest to compare * \param[in] root If non-NULL, will be set to pointer to parsed XML tree * * \return 0 if file was successfully read, parsed and verified, otherwise: * -errno on stat() failure, * -pcmk_err_cib_corrupt if file size is 0 or XML is not parseable, or * -pcmk_err_cib_modified if digests do not match * \note If root is non-NULL, it is the caller's responsibility to free *root on * successful return. */ int cib_file_read_and_verify(const char *filename, const char *sigfile, xmlNode **root) { int s_res; struct stat buf; char *local_sigfile = NULL; xmlNode *local_root = NULL; CRM_ASSERT(filename != NULL); if (root) { *root = NULL; } /* Verify that file exists and its size is nonzero */ s_res = stat(filename, &buf); if (s_res < 0) { crm_perror(LOG_WARNING, "Could not verify cluster configuration file %s", filename); return -errno; } else if (buf.st_size == 0) { crm_warn("Cluster configuration file %s is corrupt (size is zero)", filename); return -pcmk_err_cib_corrupt; } /* Parse XML */ local_root = filename2xml(filename); if (local_root == NULL) { crm_warn("Cluster configuration file %s is corrupt (unparseable as XML)", filename); return -pcmk_err_cib_corrupt; } /* If sigfile is not specified, use original file name plus .sig */ if (sigfile == NULL) { sigfile = local_sigfile = crm_strdup_printf("%s.sig", filename); } /* Verify that digests match */ if (cib_file_verify_digest(local_root, sigfile) == FALSE) { free(local_sigfile); free_xml(local_root); return -pcmk_err_cib_modified; } free(local_sigfile); if (root) { *root = local_root; } else { free_xml(local_root); } return pcmk_ok; } #define CIB_SERIES "cib" #define CIB_SERIES_MAX 100 #define CIB_SERIES_BZIP FALSE /* Must be false because archived copies are created with hard links */ #define CIB_LIVE_NAME CIB_SERIES ".xml" /*! * \internal * \brief Check whether a file is the live CIB * * \param[in] filename Name of file to check * * \return TRUE if file exists and its real path is same as live CIB's */ static gboolean cib_file_is_live(const char *filename) { gboolean same = FALSE; if (filename != NULL) { // Canonicalize file names for true comparison char *real_filename = NULL; if (pcmk__real_path(filename, &real_filename) == pcmk_rc_ok) { char *real_livename = NULL; if (pcmk__real_path(CRM_CONFIG_DIR "/" CIB_LIVE_NAME, &real_livename) == pcmk_rc_ok) { same = !strcmp(real_filename, real_livename); free(real_livename); } free(real_filename); } } return same; } /* cib_file_backup() and cib_file_write_with_digest() need to chown the * written files only in limited circumstances, so these variables allow * that to be indicated without affecting external callers */ static uid_t cib_file_owner = 0; static uid_t cib_file_group = 0; static gboolean cib_do_chown = FALSE; /*! * \internal * \brief Back up a CIB * * \param[in] cib_dirname Directory containing CIB file and backups * \param[in] cib_filename Name (relative to cib_dirname) of CIB file to back up * * \return 0 on success, -1 on error */ static int cib_file_backup(const char *cib_dirname, const char *cib_filename) { int rc = 0; unsigned int seq; char *cib_path = crm_strdup_printf("%s/%s", cib_dirname, cib_filename); char *cib_digest = crm_strdup_printf("%s.sig", cib_path); char *backup_path; char *backup_digest; // Determine backup and digest file names if (pcmk__read_series_sequence(cib_dirname, CIB_SERIES, &seq) != pcmk_rc_ok) { // @TODO maybe handle errors better ... seq = 0; } backup_path = pcmk__series_filename(cib_dirname, CIB_SERIES, seq, CIB_SERIES_BZIP); backup_digest = crm_strdup_printf("%s.sig", backup_path); /* Remove the old backups if they exist */ unlink(backup_path); unlink(backup_digest); /* Back up the CIB, by hard-linking it to the backup name */ if ((link(cib_path, backup_path) < 0) && (errno != ENOENT)) { crm_perror(LOG_ERR, "Could not archive %s by linking to %s", cib_path, backup_path); rc = -1; /* Back up the CIB signature similarly */ } else if ((link(cib_digest, backup_digest) < 0) && (errno != ENOENT)) { crm_perror(LOG_ERR, "Could not archive %s by linking to %s", cib_digest, backup_digest); rc = -1; /* Update the last counter and ensure everything is sync'd to media */ } else { pcmk__write_series_sequence(cib_dirname, CIB_SERIES, ++seq, CIB_SERIES_MAX); if (cib_do_chown) { int rc2; if ((chown(backup_path, cib_file_owner, cib_file_group) < 0) && (errno != ENOENT)) { crm_perror(LOG_ERR, "Could not set owner of %s", backup_path); rc = -1; } if ((chown(backup_digest, cib_file_owner, cib_file_group) < 0) && (errno != ENOENT)) { crm_perror(LOG_ERR, "Could not set owner of %s", backup_digest); rc = -1; } rc2 = pcmk__chown_series_sequence(cib_dirname, CIB_SERIES, cib_file_owner, cib_file_group); if (rc2 != pcmk_rc_ok) { crm_err("Could not set owner of sequence file in %s: %s", cib_dirname, pcmk_rc_str(rc2)); rc = -1; } } pcmk__sync_directory(cib_dirname); crm_info("Archived previous version as %s", backup_path); } free(cib_path); free(cib_digest); free(backup_path); free(backup_digest); return rc; } /*! * \internal * \brief Prepare CIB XML to be written to disk * * Set num_updates to 0, set cib-last-written to the current timestamp, * and strip out the status section. * * \param[in] root Root of CIB XML tree * * \return void */ static void cib_file_prepare_xml(xmlNode *root) { xmlNode *cib_status_root = NULL; /* Always write out with num_updates=0 and current last-written timestamp */ crm_xml_add(root, XML_ATTR_NUMUPDATES, "0"); crm_xml_add_last_written(root); /* Delete status section before writing to file, because * we discard it on startup anyway, and users get confused by it */ cib_status_root = find_xml_node(root, XML_CIB_TAG_STATUS, TRUE); CRM_LOG_ASSERT(cib_status_root != NULL); if (cib_status_root != NULL) { free_xml(cib_status_root); } } /*! * \internal * \brief Write CIB to disk, along with a signature file containing its digest * * \param[in] cib_root Root of XML tree to write * \param[in] cib_dirname Directory containing CIB and signature files * \param[in] cib_filename Name (relative to cib_dirname) of file to write * * \return pcmk_ok on success, * pcmk_err_cib_modified if existing cib_filename doesn't match digest, * pcmk_err_cib_backup if existing cib_filename couldn't be backed up, * or pcmk_err_cib_save if new cib_filename couldn't be saved */ int cib_file_write_with_digest(xmlNode *cib_root, const char *cib_dirname, const char *cib_filename) { int exit_rc = pcmk_ok; int rc, fd; char *digest = NULL; /* Detect CIB version for diagnostic purposes */ const char *epoch = crm_element_value(cib_root, XML_ATTR_GENERATION); const char *admin_epoch = crm_element_value(cib_root, XML_ATTR_GENERATION_ADMIN); /* Determine full CIB and signature pathnames */ char *cib_path = crm_strdup_printf("%s/%s", cib_dirname, cib_filename); char *digest_path = crm_strdup_printf("%s.sig", cib_path); /* Create temporary file name patterns for writing out CIB and signature */ char *tmp_cib = crm_strdup_printf("%s/cib.XXXXXX", cib_dirname); char *tmp_digest = crm_strdup_printf("%s/cib.XXXXXX", cib_dirname); CRM_ASSERT((cib_path != NULL) && (digest_path != NULL) && (tmp_cib != NULL) && (tmp_digest != NULL)); /* Ensure the admin didn't modify the existing CIB underneath us */ crm_trace("Reading cluster configuration file %s", cib_path); rc = cib_file_read_and_verify(cib_path, NULL, NULL); if ((rc != pcmk_ok) && (rc != -ENOENT)) { crm_err("%s was manually modified while the cluster was active!", cib_path); exit_rc = pcmk_err_cib_modified; goto cleanup; } /* Back up the existing CIB */ if (cib_file_backup(cib_dirname, cib_filename) < 0) { exit_rc = pcmk_err_cib_backup; goto cleanup; } crm_debug("Writing CIB to disk"); umask(S_IWGRP | S_IWOTH | S_IROTH); cib_file_prepare_xml(cib_root); /* Write the CIB to a temporary file, so we can deploy (near) atomically */ fd = mkstemp(tmp_cib); if (fd < 0) { crm_perror(LOG_ERR, "Couldn't open temporary file %s for writing CIB", tmp_cib); exit_rc = pcmk_err_cib_save; goto cleanup; } /* Protect the temporary file */ if (fchmod(fd, S_IRUSR | S_IWUSR) < 0) { crm_perror(LOG_ERR, "Couldn't protect temporary file %s for writing CIB", tmp_cib); exit_rc = pcmk_err_cib_save; goto cleanup; } if (cib_do_chown && (fchown(fd, cib_file_owner, cib_file_group) < 0)) { crm_perror(LOG_ERR, "Couldn't protect temporary file %s for writing CIB", tmp_cib); exit_rc = pcmk_err_cib_save; goto cleanup; } /* Write out the CIB */ if (write_xml_fd(cib_root, tmp_cib, fd, FALSE) <= 0) { crm_err("Changes couldn't be written to %s", tmp_cib); exit_rc = pcmk_err_cib_save; goto cleanup; } /* Calculate CIB digest */ digest = calculate_on_disk_digest(cib_root); CRM_ASSERT(digest != NULL); crm_info("Wrote version %s.%s.0 of the CIB to disk (digest: %s)", (admin_epoch ? admin_epoch : "0"), (epoch ? epoch : "0"), digest); /* Write the CIB digest to a temporary file */ fd = mkstemp(tmp_digest); if (fd < 0) { crm_perror(LOG_ERR, "Could not create temporary file for CIB digest"); exit_rc = pcmk_err_cib_save; goto cleanup; } if (cib_do_chown && (fchown(fd, cib_file_owner, cib_file_group) < 0)) { crm_perror(LOG_ERR, "Couldn't protect temporary file %s for writing CIB", tmp_cib); exit_rc = pcmk_err_cib_save; close(fd); goto cleanup; } rc = pcmk__write_sync(fd, digest); if (rc != pcmk_rc_ok) { crm_err("Could not write digest to %s: %s", tmp_digest, pcmk_rc_str(rc)); exit_rc = pcmk_err_cib_save; close(fd); goto cleanup; } close(fd); crm_debug("Wrote digest %s to disk", digest); /* Verify that what we wrote is sane */ crm_info("Reading cluster configuration file %s (digest: %s)", tmp_cib, tmp_digest); rc = cib_file_read_and_verify(tmp_cib, tmp_digest, NULL); CRM_ASSERT(rc == 0); /* Rename temporary files to live, and sync directory changes to media */ crm_debug("Activating %s", tmp_cib); if (rename(tmp_cib, cib_path) < 0) { crm_perror(LOG_ERR, "Couldn't rename %s as %s", tmp_cib, cib_path); exit_rc = pcmk_err_cib_save; } if (rename(tmp_digest, digest_path) < 0) { crm_perror(LOG_ERR, "Couldn't rename %s as %s", tmp_digest, digest_path); exit_rc = pcmk_err_cib_save; } pcmk__sync_directory(cib_dirname); cleanup: free(cib_path); free(digest_path); free(digest); free(tmp_digest); free(tmp_cib); return exit_rc; } cib_t * cib_file_new(const char *cib_location) { cib_file_opaque_t *private = NULL; cib_t *cib = cib_new_variant(); private = calloc(1, sizeof(cib_file_opaque_t)); CRM_ASSERT((cib != NULL) && (private != NULL)); cib->variant = cib_file; cib->variant_opaque = private; if (cib_location == NULL) { cib_location = getenv("CIB_file"); } private->flags = 0; if (cib_file_is_live(cib_location)) { set_bit(private->flags, CIB_FLAG_LIVE); crm_trace("File %s detected as live CIB", cib_location); } private->filename = strdup(cib_location); /* assign variant specific ops */ cib->delegate_fn = cib_file_perform_op_delegate; cib->cmds->signon = cib_file_signon; cib->cmds->signoff = cib_file_signoff; cib->cmds->free = cib_file_free; cib->cmds->inputfd = cib_file_inputfd; cib->cmds->register_notification = cib_file_register_notification; cib->cmds->set_connection_dnotify = cib_file_set_connection_dnotify; return cib; } static xmlNode *in_mem_cib = NULL; /*! * \internal * \brief Read CIB from disk and validate it against XML schema * * \param[in] filename Name of file to read CIB from * * \return pcmk_ok on success, * -ENXIO if file does not exist (or stat() otherwise fails), or * -pcmk_err_schema_validation if XML doesn't parse or validate * \note If filename is the live CIB, this will *not* verify its digest, * though that functionality would be trivial to add here. * Also, this will *not* verify that the file is writable, * because some callers might not need to write. */ static int load_file_cib(const char *filename) { struct stat buf; xmlNode *root = NULL; /* Ensure file is readable */ if (stat(filename, &buf) < 0) { return -ENXIO; } /* Parse XML from file */ root = filename2xml(filename); if (root == NULL) { return -pcmk_err_schema_validation; } /* Add a status section if not already present */ if (find_xml_node(root, XML_CIB_TAG_STATUS, FALSE) == NULL) { create_xml_node(root, XML_CIB_TAG_STATUS); } /* Validate XML against its specified schema */ if (validate_xml(root, NULL, TRUE) == FALSE) { const char *schema = crm_element_value(root, XML_ATTR_VALIDATION); crm_err("CIB does not validate against %s", schema); free_xml(root); return -pcmk_err_schema_validation; } /* Remember the parsed XML for later use */ in_mem_cib = root; return pcmk_ok; } int cib_file_signon(cib_t * cib, const char *name, enum cib_conn_type type) { int rc = pcmk_ok; cib_file_opaque_t *private = cib->variant_opaque; if (private->filename == NULL) { rc = -EINVAL; } else { rc = load_file_cib(private->filename); } if (rc == pcmk_ok) { - crm_debug("%s: Opened connection to local file '%s'", name, private->filename); + crm_debug("Opened connection to local file '%s' for %s", + private->filename, name); cib->state = cib_connected_command; cib->type = cib_command; } else { - fprintf(stderr, "%s: Connection to local file '%s' failed: %s\n", - name, private->filename, pcmk_strerror(rc)); + crm_info("Connection to local file '%s' for %s failed: %s\n", + private->filename, name, pcmk_strerror(rc)); } - return rc; } /*! * \internal * \brief Write out the in-memory CIB to a live CIB file * * param[in] path Full path to file to write * * \return 0 on success, -1 on failure */ static int cib_file_write_live(char *path) { uid_t uid = geteuid(); struct passwd *daemon_pwent; char *sep = strrchr(path, '/'); const char *cib_dirname, *cib_filename; int rc = 0; /* Get the desired uid/gid */ errno = 0; daemon_pwent = getpwnam(CRM_DAEMON_USER); if (daemon_pwent == NULL) { crm_perror(LOG_ERR, "Could not find %s user", CRM_DAEMON_USER); return -1; } /* If we're root, we can change the ownership; * if we're daemon, anything we create will be OK; * otherwise, block access so we don't create wrong owner */ if ((uid != 0) && (uid != daemon_pwent->pw_uid)) { crm_perror(LOG_ERR, "Must be root or %s to modify live CIB", CRM_DAEMON_USER); return 0; } /* fancy footwork to separate dirname from filename * (we know the canonical name maps to the live CIB, * but the given name might be relative, or symlinked) */ if (sep == NULL) { /* no directory component specified */ cib_dirname = "./"; cib_filename = path; } else if (sep == path) { /* given name is in / */ cib_dirname = "/"; cib_filename = path + 1; } else { /* typical case; split given name into parts */ *sep = '\0'; cib_dirname = path; cib_filename = sep + 1; } /* if we're root, we want to update the file ownership */ if (uid == 0) { cib_file_owner = daemon_pwent->pw_uid; cib_file_group = daemon_pwent->pw_gid; cib_do_chown = TRUE; } /* write the file */ if (cib_file_write_with_digest(in_mem_cib, cib_dirname, cib_filename) != pcmk_ok) { rc = -1; } /* turn off file ownership changes, for other callers */ if (uid == 0) { cib_do_chown = FALSE; } /* undo fancy stuff */ if ((sep != NULL) && (*sep == '\0')) { *sep = '/'; } return rc; } /*! * \internal * \brief Sign-off method for CIB file variants * * This will write the file to disk if needed, and free the in-memory CIB. If * the file is the live CIB, it will compute and write a signature as well. * * \param[in] cib CIB object to sign off * * \return pcmk_ok on success, pcmk_err_generic on failure * \todo This method should refuse to write the live CIB if the CIB manager is * running. */ int cib_file_signoff(cib_t * cib) { int rc = pcmk_ok; cib_file_opaque_t *private = cib->variant_opaque; crm_debug("Disconnecting from the CIB manager"); cib->state = cib_disconnected; cib->type = cib_no_connection; /* If the in-memory CIB has been changed, write it to disk */ if (is_set(private->flags, CIB_FLAG_DIRTY)) { /* If this is the live CIB, write it out with a digest */ if (is_set(private->flags, CIB_FLAG_LIVE)) { if (cib_file_write_live(private->filename) < 0) { rc = pcmk_err_generic; } /* Otherwise, it's a simple write */ } else { gboolean do_bzip = pcmk__ends_with_ext(private->filename, ".bz2"); if (write_xml_file(in_mem_cib, private->filename, do_bzip) <= 0) { rc = pcmk_err_generic; } } if (rc == pcmk_ok) { crm_info("Wrote CIB to %s", private->filename); clear_bit(private->flags, CIB_FLAG_DIRTY); } else { crm_err("Could not write CIB to %s", private->filename); } } /* Free the in-memory CIB */ free_xml(in_mem_cib); in_mem_cib = NULL; return rc; } int cib_file_free(cib_t * cib) { int rc = pcmk_ok; if (cib->state != cib_disconnected) { rc = cib_file_signoff(cib); } if (rc == pcmk_ok) { cib_file_opaque_t *private = cib->variant_opaque; free(private->filename); free(cib->cmds); free(private); free(cib); } else { fprintf(stderr, "Couldn't sign off: %d\n", rc); } return rc; } struct cib_func_entry { const char *op; gboolean read_only; cib_op_t fn; }; /* *INDENT-OFF* */ static struct cib_func_entry cib_file_ops[] = { {CIB_OP_QUERY, TRUE, cib_process_query}, {CIB_OP_MODIFY, FALSE, cib_process_modify}, {CIB_OP_APPLY_DIFF, FALSE, cib_process_diff}, {CIB_OP_BUMP, FALSE, cib_process_bump}, {CIB_OP_REPLACE, FALSE, cib_process_replace}, {CIB_OP_CREATE, FALSE, cib_process_create}, {CIB_OP_DELETE, FALSE, cib_process_delete}, {CIB_OP_ERASE, FALSE, cib_process_erase}, {CIB_OP_UPGRADE, FALSE, cib_process_upgrade}, }; /* *INDENT-ON* */ int cib_file_perform_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options) { return cib_file_perform_op_delegate(cib, op, host, section, data, output_data, call_options, NULL); } int cib_file_perform_op_delegate(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *user_name) { int rc = pcmk_ok; char *effective_user = NULL; gboolean query = FALSE; gboolean changed = FALSE; xmlNode *request = NULL; xmlNode *output = NULL; xmlNode *cib_diff = NULL; xmlNode *result_cib = NULL; cib_op_t *fn = NULL; int lpc = 0; static int max_msg_types = DIMOF(cib_file_ops); cib_file_opaque_t *private = cib->variant_opaque; #if ENABLE_ACL crm_info("Handling %s operation for %s as %s", (op? op : "invalid"), (section? section : "entire CIB"), (user_name? user_name : "default user")); #else crm_info("Handling %s operation for %s", (op? op : "invalid"), (section? section : "entire CIB")); #endif call_options |= (cib_no_mtime | cib_inhibit_bcast | cib_scope_local); if (cib->state == cib_disconnected) { return -ENOTCONN; } if (output_data != NULL) { *output_data = NULL; } if (op == NULL) { return -EINVAL; } for (lpc = 0; lpc < max_msg_types; lpc++) { if (safe_str_eq(op, cib_file_ops[lpc].op)) { fn = &(cib_file_ops[lpc].fn); query = cib_file_ops[lpc].read_only; break; } } if (fn == NULL) { return -EPROTONOSUPPORT; } cib->call_id++; request = cib_create_op(cib->call_id, "dummy-token", op, host, section, data, call_options, user_name); #if ENABLE_ACL if(user_name) { crm_xml_add(request, XML_ACL_TAG_USER, user_name); } #endif /* Mirror the logic in cib_prepare_common() */ if (section != NULL && data != NULL && crm_str_eq(crm_element_name(data), XML_TAG_CIB, TRUE)) { data = get_object_root(section, data); } rc = cib_perform_op(op, call_options, fn, query, section, request, data, TRUE, &changed, in_mem_cib, &result_cib, &cib_diff, &output); free_xml(request); if (rc == -pcmk_err_schema_validation) { validate_xml_verbose(result_cib); } if (rc != pcmk_ok) { free_xml(result_cib); } else if (query == FALSE) { xml_log_patchset(LOG_DEBUG, "cib:diff", cib_diff); free_xml(in_mem_cib); in_mem_cib = result_cib; set_bit(private->flags, CIB_FLAG_DIRTY); } free_xml(cib_diff); if (cib->op_callback != NULL) { cib->op_callback(NULL, cib->call_id, rc, output); } if (output_data && output) { if(output == in_mem_cib) { *output_data = copy_xml(output); } else { *output_data = output; } } else if(output != in_mem_cib) { free_xml(output); } free(effective_user); return rc; } diff --git a/lib/cib/cib_native.c b/lib/cib/cib_native.c index 7d562a440d..bce925e6ed 100644 --- a/lib/cib/cib_native.c +++ b/lib/cib/cib_native.c @@ -1,512 +1,513 @@ /* * Copyright 2004 International Business Machines * Later changes copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include typedef struct cib_native_opaque_s { char *token; crm_ipc_t *ipc; void (*dnotify_fn) (gpointer user_data); mainloop_io_t *source; } cib_native_opaque_t; int cib_native_perform_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options); int cib_native_perform_op_delegate(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *user_name); int cib_native_free(cib_t * cib); int cib_native_signoff(cib_t * cib); int cib_native_signon(cib_t * cib, const char *name, enum cib_conn_type type); int cib_native_signon_raw(cib_t * cib, const char *name, enum cib_conn_type type, int *event_fd); bool cib_native_dispatch(cib_t * cib); int cib_native_set_connection_dnotify(cib_t * cib, void (*dnotify) (gpointer user_data)); cib_t * cib_native_new(void) { cib_native_opaque_t *native = NULL; cib_t *cib = cib_new_variant(); native = calloc(1, sizeof(cib_native_opaque_t)); cib->variant = cib_native; cib->variant_opaque = native; native->ipc = NULL; native->source = NULL; native->dnotify_fn = NULL; /* assign variant specific ops */ cib->delegate_fn = cib_native_perform_op_delegate; cib->cmds->signon = cib_native_signon; cib->cmds->signon_raw = cib_native_signon_raw; cib->cmds->signoff = cib_native_signoff; cib->cmds->free = cib_native_free; cib->cmds->register_notification = cib_native_register_notification; cib->cmds->set_connection_dnotify = cib_native_set_connection_dnotify; return cib; } int cib_native_signon(cib_t * cib, const char *name, enum cib_conn_type type) { return cib_native_signon_raw(cib, name, type, NULL); } static int cib_native_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { const char *type = NULL; xmlNode *msg = NULL; cib_t *cib = userdata; crm_trace("dispatching %p", userdata); if (cib == NULL) { crm_err("No CIB!"); return 0; } msg = string2xml(buffer); if (msg == NULL) { crm_warn("Received a NULL message from the CIB manager"); return 0; } /* do callbacks */ type = crm_element_value(msg, F_TYPE); crm_trace("Activating %s callbacks...", type); crm_log_xml_explicit(msg, "cib-reply"); if (safe_str_eq(type, T_CIB)) { cib_native_callback(cib, msg, 0, 0); } else if (safe_str_eq(type, T_CIB_NOTIFY)) { g_list_foreach(cib->notify_list, cib_native_notify, msg); } else { crm_err("Unknown message type: %s", type); } free_xml(msg); return 0; } bool cib_native_dispatch(cib_t * cib) { gboolean stay_connected = TRUE; cib_native_opaque_t *native; if (cib == NULL) { crm_err("No CIB!"); return FALSE; } crm_trace("dispatching %p", cib); native = cib->variant_opaque; while (crm_ipc_ready(native->ipc)) { if (crm_ipc_read(native->ipc) > 0) { const char *msg = crm_ipc_buffer(native->ipc); cib_native_dispatch_internal(msg, strlen(msg), cib); } if (crm_ipc_connected(native->ipc) == FALSE) { crm_err("Connection closed"); stay_connected = FALSE; } } return stay_connected; } static void cib_native_destroy(void *userdata) { cib_t *cib = userdata; cib_native_opaque_t *native = cib->variant_opaque; crm_trace("destroying %p", userdata); cib->state = cib_disconnected; native->source = NULL; native->ipc = NULL; if (native->dnotify_fn) { native->dnotify_fn(userdata); } } int cib_native_signon_raw(cib_t * cib, const char *name, enum cib_conn_type type, int *async_fd) { int rc = pcmk_ok; const char *channel = NULL; cib_native_opaque_t *native = cib->variant_opaque; struct ipc_client_callbacks cib_callbacks = { .dispatch = cib_native_dispatch_internal, .destroy = cib_native_destroy }; cib->call_timeout = MAX_IPC_DELAY; if (type == cib_command) { cib->state = cib_connected_command; channel = PCMK__SERVER_BASED_RW; } else if (type == cib_command_nonblocking) { cib->state = cib_connected_command; channel = PCMK__SERVER_BASED_SHM; } else if (type == cib_query) { cib->state = cib_connected_query; channel = PCMK__SERVER_BASED_RO; } else { return -ENOTCONN; } crm_trace("Connecting %s channel", channel); if (async_fd != NULL) { native->ipc = crm_ipc_new(channel, 0); if (native->ipc && crm_ipc_connect(native->ipc)) { *async_fd = crm_ipc_get_fd(native->ipc); } else if (native->ipc) { - crm_perror(LOG_ERR, "Connection to cluster information base failed"); rc = -ENOTCONN; } } else { native->source = mainloop_add_ipc_client(channel, G_PRIORITY_HIGH, 512 * 1024 /* 512k */ , cib, &cib_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (rc != pcmk_ok || native->ipc == NULL || crm_ipc_connected(native->ipc) == FALSE) { - crm_debug("Connection unsuccessful (%d %p)", rc, native->ipc); + crm_info("Could not connect to CIB manager for %s", name); rc = -ENOTCONN; } if (rc == pcmk_ok) { xmlNode *reply = NULL; xmlNode *hello = create_xml_node(NULL, "cib_command"); crm_xml_add(hello, F_TYPE, T_CIB); crm_xml_add(hello, F_CIB_OPERATION, CRM_OP_REGISTER); crm_xml_add(hello, F_CIB_CLIENTNAME, name); crm_xml_add_int(hello, F_CIB_CALLOPTS, cib_sync_call); if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply) > 0) { const char *msg_type = crm_element_value(reply, F_CIB_OPERATION); rc = pcmk_ok; crm_log_xml_trace(reply, "reg-reply"); if (safe_str_neq(msg_type, CRM_OP_REGISTER)) { - crm_err("Invalid registration message: %s", msg_type); + crm_info("Reply to CIB registration message has " + "unknown type '%s'", msg_type); rc = -EPROTO; } else { native->token = crm_element_value_copy(reply, F_CIB_CLIENTID); if (native->token == NULL) { rc = -EPROTO; } } free_xml(reply); } else { rc = -ECOMM; } free_xml(hello); } if (rc == pcmk_ok) { - crm_debug("Connection to CIB manager successful"); + crm_info("Successfully connected to CIB manager for %s", name); return pcmk_ok; } - crm_debug("Connection to CIB manager failed: %s", pcmk_strerror(rc)); + crm_info("Connection to CIB manager for %s failed: %s", + name, pcmk_strerror(rc)); cib_native_signoff(cib); return rc; } int cib_native_signoff(cib_t * cib) { cib_native_opaque_t *native = cib->variant_opaque; crm_debug("Disconnecting from the CIB manager"); if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } cib->state = cib_disconnected; cib->type = cib_no_connection; return pcmk_ok; } int cib_native_free(cib_t * cib) { int rc = pcmk_ok; if (cib->state != cib_disconnected) { rc = cib_native_signoff(cib); } if (cib->state == cib_disconnected) { cib_native_opaque_t *native = cib->variant_opaque; free(native->token); free(cib->variant_opaque); free(cib->cmds); free(cib); } return rc; } int cib_native_perform_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options) { return cib_native_perform_op_delegate(cib, op, host, section, data, output_data, call_options, NULL); } int cib_native_perform_op_delegate(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *user_name) { int rc = pcmk_ok; int reply_id = 0; enum crm_ipc_flags ipc_flags = crm_ipc_flags_none; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; cib_native_opaque_t *native = cib->variant_opaque; if (cib->state == cib_disconnected) { return -ENOTCONN; } if (output_data != NULL) { *output_data = NULL; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } if (call_options & cib_sync_call) { ipc_flags |= crm_ipc_client_response; } cib->call_id++; /* prevent call_id from being negative (or zero) and conflicting * with the cib_errors enum * use 2 because we use it as (cib->call_id - 1) below */ if (cib->call_id < 1) { cib->call_id = 1; } CRM_CHECK(native->token != NULL,; ); op_msg = cib_create_op(cib->call_id, native->token, op, host, section, data, call_options, user_name); if (op_msg == NULL) { return -EPROTO; } crm_trace("Sending %s message to the CIB manager (timeout=%ds)", op, cib->call_timeout); rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, cib->call_timeout * 1000, &op_reply); free_xml(op_msg); if (rc < 0) { crm_err("Couldn't perform %s operation (timeout=%ds): %s (%d)", op, cib->call_timeout, pcmk_strerror(rc), rc); rc = -ECOMM; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (!(call_options & cib_sync_call)) { crm_trace("Async call, returning %d", cib->call_id); CRM_CHECK(cib->call_id != 0, return -ENOMSG); free_xml(op_reply); return cib->call_id; } rc = pcmk_ok; crm_element_value_int(op_reply, F_CIB_CALLID, &reply_id); if (reply_id == cib->call_id) { xmlNode *tmp = get_message_xml(op_reply, F_CIB_CALLDATA); crm_trace("Synchronous reply %d received", reply_id); if (crm_element_value_int(op_reply, F_CIB_RC, &rc) != 0) { rc = -EPROTO; } if (output_data == NULL || (call_options & cib_discard_reply)) { crm_trace("Discarding reply"); } else if (tmp != NULL) { *output_data = copy_xml(tmp); } } else if (reply_id <= 0) { crm_err("Received bad reply: No id set"); crm_log_xml_err(op_reply, "Bad reply"); rc = -ENOMSG; goto done; } else { crm_err("Received bad reply: %d (wanted %d)", reply_id, cib->call_id); crm_log_xml_err(op_reply, "Old reply"); rc = -ENOMSG; goto done; } if (op_reply == NULL && cib->state == cib_disconnected) { rc = -ENOTCONN; } else if (rc == pcmk_ok && op_reply == NULL) { rc = -ETIME; } switch (rc) { case pcmk_ok: case -EPERM: break; /* This is an internal value that clients do not and should not care about */ case -pcmk_err_diff_resync: rc = pcmk_ok; break; /* These indicate internal problems */ case -EPROTO: case -ENOMSG: crm_err("Call failed: %s", pcmk_strerror(rc)); if (op_reply) { crm_log_xml_err(op_reply, "Invalid reply"); } break; default: if (safe_str_neq(op, CIB_OP_QUERY)) { crm_warn("Call failed: %s", pcmk_strerror(rc)); } } done: if (crm_ipc_connected(native->ipc) == FALSE) { crm_err("The CIB manager disconnected"); cib->state = cib_disconnected; } free_xml(op_reply); return rc; } int cib_native_set_connection_dnotify(cib_t * cib, void (*dnotify) (gpointer user_data)) { cib_native_opaque_t *native = NULL; if (cib == NULL) { crm_err("No CIB!"); return FALSE; } native = cib->variant_opaque; native->dnotify_fn = dnotify; return pcmk_ok; } int cib_native_register_notification(cib_t * cib, const char *callback, int enabled) { int rc = pcmk_ok; xmlNode *notify_msg = create_xml_node(NULL, "cib-callback"); cib_native_opaque_t *native = cib->variant_opaque; if (cib->state != cib_disconnected) { crm_xml_add(notify_msg, F_CIB_OPERATION, T_CIB_NOTIFY); crm_xml_add(notify_msg, F_CIB_NOTIFY_TYPE, callback); crm_xml_add_int(notify_msg, F_CIB_NOTIFY_ACTIVATE, enabled); rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, 1000 * cib->call_timeout, NULL); if (rc <= 0) { crm_trace("Notification not registered: %d", rc); rc = -ECOMM; } } free_xml(notify_msg); return rc; } diff --git a/lib/cib/cib_remote.c b/lib/cib/cib_remote.c index 9411f90e23..ed93700243 100644 --- a/lib/cib/cib_remote.c +++ b/lib/cib/cib_remote.c @@ -1,636 +1,636 @@ /* * Copyright 2008-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include gnutls_anon_client_credentials_t anon_cred_c; # define DEFAULT_CLIENT_HANDSHAKE_TIMEOUT 5000 /* 5 seconds */ const int kx_prio[] = { GNUTLS_KX_ANON_DH, 0 }; static gboolean remote_gnutls_credentials_init = FALSE; #else typedef void gnutls_session_t; #endif #include #ifndef ON_BSD # include #endif #define DH_BITS 1024 typedef struct cib_remote_opaque_s { int flags; int socket; int port; char *server; char *user; char *passwd; gboolean encrypted; pcmk__remote_t command; pcmk__remote_t callback; } cib_remote_opaque_t; void cib_remote_connection_destroy(gpointer user_data); int cib_remote_callback_dispatch(gpointer user_data); int cib_remote_command_dispatch(gpointer user_data); int cib_remote_signon(cib_t * cib, const char *name, enum cib_conn_type type); int cib_remote_signoff(cib_t * cib); int cib_remote_free(cib_t * cib); int cib_remote_perform_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *name); static int cib_remote_inputfd(cib_t * cib) { cib_remote_opaque_t *private = cib->variant_opaque; return private->callback.tcp_socket; } static int cib_remote_set_connection_dnotify(cib_t * cib, void (*dnotify) (gpointer user_data)) { return -EPROTONOSUPPORT; } static int cib_remote_register_notification(cib_t * cib, const char *callback, int enabled) { xmlNode *notify_msg = create_xml_node(NULL, "cib_command"); cib_remote_opaque_t *private = cib->variant_opaque; crm_xml_add(notify_msg, F_CIB_OPERATION, T_CIB_NOTIFY); crm_xml_add(notify_msg, F_CIB_NOTIFY_TYPE, callback); crm_xml_add_int(notify_msg, F_CIB_NOTIFY_ACTIVATE, enabled); pcmk__remote_send_xml(&private->callback, notify_msg); free_xml(notify_msg); return pcmk_ok; } cib_t * cib_remote_new(const char *server, const char *user, const char *passwd, int port, gboolean encrypted) { cib_remote_opaque_t *private = NULL; cib_t *cib = cib_new_variant(); private = calloc(1, sizeof(cib_remote_opaque_t)); cib->variant = cib_remote; cib->variant_opaque = private; if (server) { private->server = strdup(server); } if (user) { private->user = strdup(user); } if (passwd) { private->passwd = strdup(passwd); } private->port = port; private->encrypted = encrypted; /* assign variant specific ops */ cib->delegate_fn = cib_remote_perform_op; cib->cmds->signon = cib_remote_signon; cib->cmds->signoff = cib_remote_signoff; cib->cmds->free = cib_remote_free; cib->cmds->inputfd = cib_remote_inputfd; cib->cmds->register_notification = cib_remote_register_notification; cib->cmds->set_connection_dnotify = cib_remote_set_connection_dnotify; return cib; } static int cib_tls_close(cib_t * cib) { cib_remote_opaque_t *private = cib->variant_opaque; #ifdef HAVE_GNUTLS_GNUTLS_H if (private->encrypted) { if (private->command.tls_session) { gnutls_bye(*(private->command.tls_session), GNUTLS_SHUT_RDWR); gnutls_deinit(*(private->command.tls_session)); gnutls_free(private->command.tls_session); } if (private->callback.tls_session) { gnutls_bye(*(private->callback.tls_session), GNUTLS_SHUT_RDWR); gnutls_deinit(*(private->callback.tls_session)); gnutls_free(private->callback.tls_session); } private->command.tls_session = NULL; private->callback.tls_session = NULL; if (remote_gnutls_credentials_init) { gnutls_anon_free_client_credentials(anon_cred_c); gnutls_global_deinit(); remote_gnutls_credentials_init = FALSE; } } #endif if (private->command.tcp_socket) { shutdown(private->command.tcp_socket, SHUT_RDWR); /* no more receptions */ close(private->command.tcp_socket); } if (private->callback.tcp_socket) { shutdown(private->callback.tcp_socket, SHUT_RDWR); /* no more receptions */ close(private->callback.tcp_socket); } private->command.tcp_socket = 0; private->callback.tcp_socket = 0; free(private->command.buffer); free(private->callback.buffer); private->command.buffer = NULL; private->callback.buffer = NULL; return 0; } static inline int cib__tls_client_handshake(pcmk__remote_t *remote) { return pcmk__tls_client_handshake(remote, DEFAULT_CLIENT_HANDSHAKE_TIMEOUT); } static int cib_tls_signon(cib_t *cib, pcmk__remote_t *connection, gboolean event_channel) { cib_remote_opaque_t *private = cib->variant_opaque; int rc; xmlNode *answer = NULL; xmlNode *login = NULL; static struct mainloop_fd_callbacks cib_fd_callbacks = { 0, }; cib_fd_callbacks.dispatch = event_channel ? cib_remote_callback_dispatch : cib_remote_command_dispatch; cib_fd_callbacks.destroy = cib_remote_connection_destroy; connection->tcp_socket = -1; #ifdef HAVE_GNUTLS_GNUTLS_H connection->tls_session = NULL; #endif rc = pcmk__connect_remote(private->server, private->port, 0, NULL, &(connection->tcp_socket), NULL, NULL); if (rc != pcmk_rc_ok) { - crm_err("Remote connection to %s:%d failed: %s " CRM_XS " rc=%d", - private->server, private->port, pcmk_rc_str(rc), rc); + crm_info("Remote connection to %s:%d failed: %s " CRM_XS " rc=%d", + private->server, private->port, pcmk_rc_str(rc), rc); return -ENOTCONN; } if (private->encrypted) { /* initialize GnuTls lib */ #ifdef HAVE_GNUTLS_GNUTLS_H if (remote_gnutls_credentials_init == FALSE) { crm_gnutls_global_init(); gnutls_anon_allocate_client_credentials(&anon_cred_c); remote_gnutls_credentials_init = TRUE; } /* bind the socket to GnuTls lib */ connection->tls_session = pcmk__new_tls_session(connection->tcp_socket, GNUTLS_CLIENT, GNUTLS_CRD_ANON, anon_cred_c); if (connection->tls_session == NULL) { cib_tls_close(cib); return -1; } if (cib__tls_client_handshake(connection) != pcmk_rc_ok) { crm_err("Session creation for %s:%d failed", private->server, private->port); gnutls_deinit(*connection->tls_session); gnutls_free(connection->tls_session); connection->tls_session = NULL; cib_tls_close(cib); return -1; } #else return -EPROTONOSUPPORT; #endif } /* login to server */ login = create_xml_node(NULL, "cib_command"); crm_xml_add(login, "op", "authenticate"); crm_xml_add(login, "user", private->user); crm_xml_add(login, "password", private->passwd); crm_xml_add(login, "hidden", "password"); pcmk__remote_send_xml(connection, login); free_xml(login); rc = pcmk_ok; if (pcmk__read_remote_message(connection, -1) == ENOTCONN) { rc = -ENOTCONN; } answer = pcmk__remote_message_xml(connection); crm_log_xml_trace(answer, "Reply"); if (answer == NULL) { rc = -EPROTO; } else { /* grab the token */ const char *msg_type = crm_element_value(answer, F_CIB_OPERATION); const char *tmp_ticket = crm_element_value(answer, F_CIB_CLIENTID); if (safe_str_neq(msg_type, CRM_OP_REGISTER)) { crm_err("Invalid registration message: %s", msg_type); rc = -EPROTO; } else if (tmp_ticket == NULL) { rc = -EPROTO; } else { connection->token = strdup(tmp_ticket); } } free_xml(answer); answer = NULL; if (rc != 0) { cib_tls_close(cib); return rc; } crm_trace("remote client connection established"); connection->source = mainloop_add_fd("cib-remote", G_PRIORITY_HIGH, connection->tcp_socket, cib, &cib_fd_callbacks); return rc; } void cib_remote_connection_destroy(gpointer user_data) { crm_err("Connection destroyed"); #ifdef HAVE_GNUTLS_GNUTLS_H cib_tls_close(user_data); #endif return; } int cib_remote_command_dispatch(gpointer user_data) { int rc; cib_t *cib = user_data; cib_remote_opaque_t *private = cib->variant_opaque; rc = pcmk__read_remote_message(&private->command, -1); free(private->command.buffer); private->command.buffer = NULL; crm_err("received late reply for remote cib connection, discarding"); if (rc == ENOTCONN) { return -1; } return 0; } int cib_remote_callback_dispatch(gpointer user_data) { int rc; cib_t *cib = user_data; cib_remote_opaque_t *private = cib->variant_opaque; xmlNode *msg = NULL; crm_info("Message on callback channel"); rc = pcmk__read_remote_message(&private->callback, -1); msg = pcmk__remote_message_xml(&private->callback); while (msg) { const char *type = crm_element_value(msg, F_TYPE); crm_trace("Activating %s callbacks...", type); if (safe_str_eq(type, T_CIB)) { cib_native_callback(cib, msg, 0, 0); } else if (safe_str_eq(type, T_CIB_NOTIFY)) { g_list_foreach(cib->notify_list, cib_native_notify, msg); } else { crm_err("Unknown message type: %s", type); } free_xml(msg); msg = pcmk__remote_message_xml(&private->callback); } if (rc == ENOTCONN) { return -1; } return 0; } int cib_remote_signon(cib_t * cib, const char *name, enum cib_conn_type type) { int rc = pcmk_ok; cib_remote_opaque_t *private = cib->variant_opaque; if (private->passwd == NULL) { struct termios settings; rc = tcgetattr(0, &settings); if(rc == 0) { settings.c_lflag &= ~ECHO; rc = tcsetattr(0, TCSANOW, &settings); } if(rc == 0) { fprintf(stderr, "Password: "); private->passwd = calloc(1, 1024); rc = scanf("%1023s", private->passwd); fprintf(stderr, "\n"); } - /* fprintf(stderr, "entered: '%s'\n", buffer); */ if (rc < 1) { private->passwd = NULL; } settings.c_lflag |= ECHO; rc = tcsetattr(0, TCSANOW, &settings); } if (private->server == NULL || private->user == NULL) { rc = -EINVAL; } if (rc == pcmk_ok) { rc = cib_tls_signon(cib, &(private->command), FALSE); } if (rc == pcmk_ok) { rc = cib_tls_signon(cib, &(private->callback), TRUE); } if (rc == pcmk_ok) { xmlNode *hello = cib_create_op(0, private->callback.token, CRM_OP_REGISTER, NULL, NULL, NULL, 0, NULL); crm_xml_add(hello, F_CIB_CLIENTNAME, name); pcmk__remote_send_xml(&private->command, hello); free_xml(hello); } if (rc == pcmk_ok) { - crm_notice("%s: Opened connection to %s:%d", name, private->server, private->port); + crm_info("Opened connection to %s:%d for %s", + private->server, private->port, name); cib->state = cib_connected_command; cib->type = cib_command; } else { - fprintf(stderr, "%s: Connection to %s:%d failed: %s\n", - name, private->server, private->port, pcmk_strerror(rc)); + crm_info("Connection to %s:%d for %s failed: %s\n", + private->server, private->port, name, pcmk_strerror(rc)); } return rc; } int cib_remote_signoff(cib_t * cib) { int rc = pcmk_ok; /* cib_remote_opaque_t *private = cib->variant_opaque; */ crm_debug("Disconnecting from the CIB manager"); #ifdef HAVE_GNUTLS_GNUTLS_H cib_tls_close(cib); #endif cib->state = cib_disconnected; cib->type = cib_no_connection; return rc; } int cib_remote_free(cib_t * cib) { int rc = pcmk_ok; crm_warn("Freeing CIB"); if (cib->state != cib_disconnected) { rc = cib_remote_signoff(cib); if (rc == pcmk_ok) { cib_remote_opaque_t *private = cib->variant_opaque; free(private->server); free(private->user); free(private->passwd); free(cib->cmds); free(private); free(cib); } } return rc; } int cib_remote_perform_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *name) { int rc; int remaining_time = 0; time_t start_time; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; cib_remote_opaque_t *private = cib->variant_opaque; if (cib->state == cib_disconnected) { return -ENOTCONN; } if (output_data != NULL) { *output_data = NULL; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } cib->call_id++; /* prevent call_id from being negative (or zero) and conflicting * with the cib_errors enum * use 2 because we use it as (cib->call_id - 1) below */ if (cib->call_id < 1) { cib->call_id = 1; } op_msg = cib_create_op(cib->call_id, private->callback.token, op, host, section, data, call_options, NULL); if (op_msg == NULL) { return -EPROTO; } crm_trace("Sending %s message to the CIB manager", op); if (!(call_options & cib_sync_call)) { pcmk__remote_send_xml(&private->callback, op_msg); } else { pcmk__remote_send_xml(&private->command, op_msg); } free_xml(op_msg); if ((call_options & cib_discard_reply)) { crm_trace("Discarding reply"); return pcmk_ok; } else if (!(call_options & cib_sync_call)) { return cib->call_id; } crm_trace("Waiting for a synchronous reply"); start_time = time(NULL); remaining_time = cib->call_timeout ? cib->call_timeout : 60; rc = pcmk_rc_ok; while (remaining_time > 0 && (rc != ENOTCONN)) { int reply_id = -1; int msg_id = cib->call_id; rc = pcmk__read_remote_message(&private->command, remaining_time * 1000); op_reply = pcmk__remote_message_xml(&private->command); if (!op_reply) { break; } crm_element_value_int(op_reply, F_CIB_CALLID, &reply_id); if (reply_id == msg_id) { break; } else if (reply_id < msg_id) { crm_debug("Received old reply: %d (wanted %d)", reply_id, msg_id); crm_log_xml_trace(op_reply, "Old reply"); } else if ((reply_id - 10000) > msg_id) { /* wrap-around case */ crm_debug("Received old reply: %d (wanted %d)", reply_id, msg_id); crm_log_xml_trace(op_reply, "Old reply"); } else { crm_err("Received a __future__ reply:" " %d (wanted %d)", reply_id, msg_id); } free_xml(op_reply); op_reply = NULL; /* wasn't the right reply, try and read some more */ remaining_time = time(NULL) - start_time; } /* if(IPC_ISRCONN(native->command_channel) == FALSE) { */ /* crm_err("The CIB manager disconnected: %d", */ /* native->command_channel->ch_status); */ /* cib->state = cib_disconnected; */ /* } */ if (rc == ENOTCONN) { crm_err("Disconnected while waiting for reply."); return -ENOTCONN; } else if (op_reply == NULL) { crm_err("No reply message - empty"); return -ENOMSG; } crm_trace("Synchronous reply received"); /* Start processing the reply... */ if (crm_element_value_int(op_reply, F_CIB_RC, &rc) != 0) { rc = -EPROTO; } if (rc == -pcmk_err_diff_resync) { /* This is an internal value that clients do not and should not care about */ rc = pcmk_ok; } if (rc == pcmk_ok || rc == -EPERM) { crm_log_xml_debug(op_reply, "passed"); } else { /* } else if(rc == -ETIME) { */ crm_err("Call failed: %s", pcmk_strerror(rc)); crm_log_xml_warn(op_reply, "failed"); } if (output_data == NULL) { /* do nothing more */ } else if (!(call_options & cib_discard_reply)) { xmlNode *tmp = get_message_xml(op_reply, F_CIB_CALLDATA); if (tmp == NULL) { crm_trace("No output in reply to \"%s\" command %d", op, cib->call_id - 1); } else { *output_data = copy_xml(tmp); } } free_xml(op_reply); return rc; } diff --git a/lib/common/cmdline.c b/lib/common/cmdline.c index e444ad1fab..5cc829a358 100644 --- a/lib/common/cmdline.c +++ b/lib/common/cmdline.c @@ -1,263 +1,267 @@ /* * Copyright 2019 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include static gboolean bump_verbosity(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { pcmk__common_args_t *common_args = (pcmk__common_args_t *) data; common_args->verbosity++; return TRUE; } pcmk__common_args_t * pcmk__new_common_args(const char *summary) { pcmk__common_args_t *args = NULL; args = calloc(1, sizeof(pcmk__common_args_t)); if (args == NULL) { crm_exit(crm_errno2exit(-ENOMEM)); } args->summary = strdup(summary); if (args->summary == NULL) { crm_exit(crm_errno2exit(-ENOMEM)); } return args; } static void free_common_args(gpointer data) { pcmk__common_args_t *common_args = (pcmk__common_args_t *) data; free(common_args->summary); free(common_args->output_ty); free(common_args->output_dest); if (common_args->output_as_descr != NULL) { free(common_args->output_as_descr); } free(common_args); } GOptionContext * pcmk__build_arg_context(pcmk__common_args_t *common_args, const char *fmts, GOptionGroup **output_group) { char *desc = crm_strdup_printf("Report bugs to %s\n", PACKAGE_BUGREPORT); GOptionContext *context; GOptionGroup *main_group; GOptionEntry main_entries[3] = { { "version", '$', 0, G_OPTION_ARG_NONE, &(common_args->version), "Display software version and exit", NULL }, { "verbose", 'V', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, bump_verbosity, "Increase debug output (may be specified multiple times)", NULL }, { NULL } }; main_group = g_option_group_new(NULL, "Application Options:", NULL, common_args, free_common_args); g_option_group_add_entries(main_group, main_entries); context = g_option_context_new(NULL); g_option_context_set_summary(context, common_args->summary); g_option_context_set_description(context, desc); g_option_context_set_main_group(context, main_group); if (fmts != NULL) { GOptionEntry output_entries[3] = { { "output-as", 0, 0, G_OPTION_ARG_STRING, &(common_args->output_ty), NULL, "FORMAT" }, { "output-to", 0, 0, G_OPTION_ARG_STRING, &(common_args->output_dest), "Specify file name for output (or \"-\" for stdout)", "DEST" }, { NULL } }; if (*output_group == NULL) { *output_group = g_option_group_new("output", "Output Options:", "Show output help", NULL, NULL); } common_args->output_as_descr = crm_strdup_printf("Specify output format as one of: %s", fmts); output_entries[0].description = common_args->output_as_descr; g_option_group_add_entries(*output_group, output_entries); g_option_context_add_group(context, *output_group); } free(desc); + // main_group is now owned by context, we don't free it here + // cppcheck-suppress memleak return context; } void pcmk__free_arg_context(GOptionContext *context) { if (context == NULL) { return; } g_option_context_free(context); } void pcmk__add_main_args(GOptionContext *context, GOptionEntry entries[]) { GOptionGroup *main_group = g_option_context_get_main_group(context); g_option_group_add_entries(main_group, entries); } void pcmk__add_arg_group(GOptionContext *context, const char *name, const char *header, const char *desc, GOptionEntry entries[]) { GOptionGroup *group = NULL; group = g_option_group_new(name, header, desc, NULL, NULL); g_option_group_add_entries(group, entries); g_option_context_add_group(context, group); + // group is now owned by context, we don't free it here + // cppcheck-suppress memleak } gchar ** pcmk__cmdline_preproc(char **argv, const char *special) { gchar **retval = NULL; GPtrArray *arr = NULL; bool saw_dash_dash = false; if (argv == NULL) { return retval; } arr = g_ptr_array_new(); for (int i = 0; argv[i] != NULL; i++) { /* If this is the first time we saw "--" in the command line, set * a flag so we know to just copy everything after it over. We also * want to copy the "--" over so whatever actually parses the command * line when we're done knows where arguments end. */ if (saw_dash_dash == false && strcmp(argv[i], "--") == 0) { saw_dash_dash = true; } if (saw_dash_dash == true) { g_ptr_array_add(arr, strdup(argv[i])); continue; } /* This is just a dash by itself. That could indicate stdin/stdout, or * it could be user error. Copy it over and let glib figure it out. */ if (safe_str_eq(argv[i], "-")) { g_ptr_array_add(arr, strdup(argv[i])); continue; } /* This is a short argument, or perhaps several. Iterate over it * and explode them out into individual arguments. */ if (g_str_has_prefix(argv[i], "-") && !g_str_has_prefix(argv[i], "--")) { /* Skip over leading dash */ char *ch = argv[i]+1; while (*ch != '\0') { /* This is a special short argument that takes an option. getopt * allows values to be interspersed with a list of arguments, but * glib does not. Grab both the argument and its value and * separate them into a new argument. */ if (strchr(special, *ch) != NULL) { /* The argument does not occur at the end of this string of * arguments. Take everything through the end as its value. */ if (*(ch+1) != '\0') { g_ptr_array_add(arr, (gpointer) crm_strdup_printf("-%c", *ch)); g_ptr_array_add(arr, strdup(ch+1)); break; /* The argument occurs at the end of this string. Hopefully * whatever comes next in argv is its value. It may not be, * but that is not for us to decide. */ } else { g_ptr_array_add(arr, (gpointer) crm_strdup_printf("-%c", *ch)); ch++; } /* This is a regular short argument. Just copy it over. */ } else { g_ptr_array_add(arr, (gpointer) crm_strdup_printf("-%c", *ch)); ch++; } } /* This is a long argument, or an option, or something else. * Copy it over - everything else is copied, so this keeps it easy for * the caller to know what to do with the memory when it's done. */ } else { g_ptr_array_add(arr, strdup(argv[i])); } } /* Convert the GPtrArray into a gchar **, which the command line parsing * code knows how to deal with. Then we can free the array (but not its * contents). */ retval = calloc(arr->len+1, sizeof(char *)); for (int i = 0; i < arr->len; i++) { retval[i] = (gchar *) g_ptr_array_index(arr, i); } g_ptr_array_free(arr, FALSE); return retval; } G_GNUC_PRINTF(3, 4) gboolean pcmk__force_args(GOptionContext *context, GError **error, const char *format, ...) { int len = 0; char *buf = NULL; gchar **extra_args = NULL; va_list ap; gboolean retval = TRUE; va_start(ap, format); len = vasprintf(&buf, format, ap); CRM_ASSERT(len > 0); va_end(ap); if (!g_shell_parse_argv(buf, NULL, &extra_args, error)) { g_strfreev(extra_args); free(buf); return FALSE; } retval = g_option_context_parse_strv(context, &extra_args, error); g_strfreev(extra_args); free(buf); return retval; } diff --git a/lib/common/results.c b/lib/common/results.c index cc74cbecda..df28f857e4 100644 --- a/lib/common/results.c +++ b/lib/common/results.c @@ -1,767 +1,767 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include // @COMPAT Legacy function return codes //! \deprecated Use standard return codes and pcmk_rc_name() instead const char * pcmk_errorname(int rc) { rc = abs(rc); switch (rc) { case pcmk_err_generic: return "pcmk_err_generic"; case pcmk_err_no_quorum: return "pcmk_err_no_quorum"; case pcmk_err_schema_validation: return "pcmk_err_schema_validation"; case pcmk_err_transform_failed: return "pcmk_err_transform_failed"; case pcmk_err_old_data: return "pcmk_err_old_data"; case pcmk_err_diff_failed: return "pcmk_err_diff_failed"; case pcmk_err_diff_resync: return "pcmk_err_diff_resync"; case pcmk_err_cib_modified: return "pcmk_err_cib_modified"; case pcmk_err_cib_backup: return "pcmk_err_cib_backup"; case pcmk_err_cib_save: return "pcmk_err_cib_save"; case pcmk_err_cib_corrupt: return "pcmk_err_cib_corrupt"; case pcmk_err_multiple: return "pcmk_err_multiple"; case pcmk_err_node_unknown: return "pcmk_err_node_unknown"; case pcmk_err_already: return "pcmk_err_already"; case pcmk_err_bad_nvpair: return "pcmk_err_bad_nvpair"; case pcmk_err_unknown_format: return "pcmk_err_unknown_format"; default: return pcmk_rc_name(rc); // system errno } } //! \deprecated Use standard return codes and pcmk_rc_str() instead const char * pcmk_strerror(int rc) { if (rc == 0) { return "OK"; } rc = abs(rc); // Of course rc > 0 ... unless someone passed INT_MIN as rc if ((rc > 0) && (rc < PCMK_ERROR_OFFSET)) { return strerror(rc); } switch (rc) { case pcmk_err_generic: return "Generic Pacemaker error"; case pcmk_err_no_quorum: return "Operation requires quorum"; case pcmk_err_schema_validation: return "Update does not conform to the configured schema"; case pcmk_err_transform_failed: return "Schema transform failed"; case pcmk_err_old_data: return "Update was older than existing configuration"; case pcmk_err_diff_failed: return "Application of an update diff failed"; case pcmk_err_diff_resync: return "Application of an update diff failed, requesting a full refresh"; case pcmk_err_cib_modified: return "The on-disk configuration was manually modified"; case pcmk_err_cib_backup: return "Could not archive the previous configuration"; case pcmk_err_cib_save: return "Could not save the new configuration to disk"; case pcmk_err_cib_corrupt: return "Could not parse on-disk configuration"; case pcmk_err_multiple: return "Resource active on multiple nodes"; case pcmk_err_node_unknown: return "Node not found"; case pcmk_err_already: return "Situation already as requested"; case pcmk_err_bad_nvpair: return "Bad name/value pair given"; case pcmk_err_schema_unchanged: return "Schema is already the latest available"; case pcmk_err_unknown_format: return "Unknown output format"; /* The following cases will only be hit on systems for which they are non-standard */ /* coverity[dead_error_condition] False positive on non-Linux */ case ENOTUNIQ: return "Name not unique on network"; /* coverity[dead_error_condition] False positive on non-Linux */ case ECOMM: return "Communication error on send"; /* coverity[dead_error_condition] False positive on non-Linux */ case ELIBACC: return "Can not access a needed shared library"; /* coverity[dead_error_condition] False positive on non-Linux */ case EREMOTEIO: return "Remote I/O error"; /* coverity[dead_error_condition] False positive on non-Linux */ case EUNATCH: return "Protocol driver not attached"; /* coverity[dead_error_condition] False positive on non-Linux */ case ENOKEY: return "Required key not available"; } crm_err("Unknown error code: %d", rc); return "Unknown error"; } // Standard Pacemaker API return codes /* This array is used only for nonzero values of pcmk_rc_e. Its values must be * kept in the exact reverse order of the enum value numbering (i.e. add new * values to the end of the array). */ static struct pcmk__rc_info { const char *name; const char *desc; int legacy_rc; } pcmk__rcs[] = { { "pcmk_rc_error", "Error", -pcmk_err_generic, }, { "pcmk_rc_unknown_format", "Unknown output format", -pcmk_err_unknown_format, }, { "pcmk_rc_bad_nvpair", "Bad name/value pair given", -pcmk_err_bad_nvpair, }, { "pcmk_rc_already", "Already in requested state", -pcmk_err_already, }, { "pcmk_rc_node_unknown", "Node not found", -pcmk_err_node_unknown, }, { "pcmk_rc_multiple", "Resource active on multiple nodes", -pcmk_err_multiple, }, { "pcmk_rc_cib_corrupt", "Could not parse on-disk configuration", -pcmk_err_cib_corrupt, }, { "pcmk_rc_cib_save", "Could not save new configuration to disk", -pcmk_err_cib_save, }, { "pcmk_rc_cib_backup", "Could not archive previous configuration", -pcmk_err_cib_backup, }, { "pcmk_rc_cib_modified", "On-disk configuration was manually modified", -pcmk_err_cib_modified, }, { "pcmk_rc_diff_resync", "Application of update diff failed, requesting full refresh", -pcmk_err_diff_resync, }, { "pcmk_rc_diff_failed", "Application of update diff failed", -pcmk_err_diff_failed, }, { "pcmk_rc_old_data", "Update was older than existing configuration", -pcmk_err_old_data, }, { "pcmk_rc_transform_failed", "Schema transform failed", -pcmk_err_transform_failed, }, { "pcmk_rc_schema_unchanged", "Schema is already the latest available", -pcmk_err_schema_unchanged, }, { "pcmk_rc_schema_validation", "Update does not conform to the configured schema", -pcmk_err_schema_validation, }, { "pcmk_rc_no_quorum", "Operation requires quorum", -pcmk_err_no_quorum, }, { "pcmk_rc_ipc_pid_only", "IPC server process is active but not accepting connections", -pcmk_err_generic, }, { "pcmk_rc_ipc_unresponsive", "IPC server is unresponsive", -pcmk_err_generic, }, { "pcmk_rc_ipc_unauthorized", "IPC server is blocked by unauthorized process", -pcmk_err_generic, }, { "pcmk_rc_op_unsatisifed", "Not applicable under current conditions", -pcmk_err_generic, }, { "pcmk_rc_undetermined", "Result undetermined", -pcmk_err_generic, }, { "pcmk_rc_before_range", "Result occurs before given range", -pcmk_err_generic, }, { "pcmk_rc_within_range", "Result occurs within given range", -pcmk_err_generic, }, { "pcmk_rc_after_range", "Result occurs after given range", -pcmk_err_generic, }, { "pcmk_rc_no_output", "Output message produced no output", -pcmk_err_generic, } }; #define PCMK__N_RC (sizeof(pcmk__rcs) / sizeof(struct pcmk__rc_info)) /*! * \brief Get a return code constant name as a string * * \param[in] rc Integer return code to convert * * \return String of constant name corresponding to rc */ const char * pcmk_rc_name(int rc) { if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) { return pcmk__rcs[pcmk_rc_error - rc].name; } switch (rc) { case pcmk_rc_ok: return "pcmk_rc_ok"; case E2BIG: return "E2BIG"; case EACCES: return "EACCES"; case EADDRINUSE: return "EADDRINUSE"; case EADDRNOTAVAIL: return "EADDRNOTAVAIL"; case EAFNOSUPPORT: return "EAFNOSUPPORT"; case EAGAIN: return "EAGAIN"; case EALREADY: return "EALREADY"; case EBADF: return "EBADF"; case EBADMSG: return "EBADMSG"; case EBUSY: return "EBUSY"; case ECANCELED: return "ECANCELED"; case ECHILD: return "ECHILD"; case ECOMM: return "ECOMM"; case ECONNABORTED: return "ECONNABORTED"; case ECONNREFUSED: return "ECONNREFUSED"; case ECONNRESET: return "ECONNRESET"; /* case EDEADLK: return "EDEADLK"; */ case EDESTADDRREQ: return "EDESTADDRREQ"; case EDOM: return "EDOM"; case EDQUOT: return "EDQUOT"; case EEXIST: return "EEXIST"; case EFAULT: return "EFAULT"; case EFBIG: return "EFBIG"; case EHOSTDOWN: return "EHOSTDOWN"; case EHOSTUNREACH: return "EHOSTUNREACH"; case EIDRM: return "EIDRM"; case EILSEQ: return "EILSEQ"; case EINPROGRESS: return "EINPROGRESS"; case EINTR: return "EINTR"; case EINVAL: return "EINVAL"; case EIO: return "EIO"; case EISCONN: return "EISCONN"; case EISDIR: return "EISDIR"; case ELIBACC: return "ELIBACC"; case ELOOP: return "ELOOP"; case EMFILE: return "EMFILE"; case EMLINK: return "EMLINK"; case EMSGSIZE: return "EMSGSIZE"; #ifdef EMULTIHOP // Not available on OpenBSD case EMULTIHOP: return "EMULTIHOP"; #endif case ENAMETOOLONG: return "ENAMETOOLONG"; case ENETDOWN: return "ENETDOWN"; case ENETRESET: return "ENETRESET"; case ENETUNREACH: return "ENETUNREACH"; case ENFILE: return "ENFILE"; case ENOBUFS: return "ENOBUFS"; case ENODATA: return "ENODATA"; case ENODEV: return "ENODEV"; case ENOENT: return "ENOENT"; case ENOEXEC: return "ENOEXEC"; case ENOKEY: return "ENOKEY"; case ENOLCK: return "ENOLCK"; #ifdef ENOLINK // Not available on OpenBSD case ENOLINK: return "ENOLINK"; #endif case ENOMEM: return "ENOMEM"; case ENOMSG: return "ENOMSG"; case ENOPROTOOPT: return "ENOPROTOOPT"; case ENOSPC: return "ENOSPC"; case ENOSR: return "ENOSR"; case ENOSTR: return "ENOSTR"; case ENOSYS: return "ENOSYS"; case ENOTBLK: return "ENOTBLK"; case ENOTCONN: return "ENOTCONN"; case ENOTDIR: return "ENOTDIR"; case ENOTEMPTY: return "ENOTEMPTY"; case ENOTSOCK: return "ENOTSOCK"; #if ENOTSUP != EOPNOTSUPP case ENOTSUP: return "ENOTSUP"; #endif case ENOTTY: return "ENOTTY"; case ENOTUNIQ: return "ENOTUNIQ"; case ENXIO: return "ENXIO"; case EOPNOTSUPP: return "EOPNOTSUPP"; case EOVERFLOW: return "EOVERFLOW"; case EPERM: return "EPERM"; case EPFNOSUPPORT: return "EPFNOSUPPORT"; case EPIPE: return "EPIPE"; case EPROTO: return "EPROTO"; case EPROTONOSUPPORT: return "EPROTONOSUPPORT"; case EPROTOTYPE: return "EPROTOTYPE"; case ERANGE: return "ERANGE"; case EREMOTE: return "EREMOTE"; case EREMOTEIO: return "EREMOTEIO"; case EROFS: return "EROFS"; case ESHUTDOWN: return "ESHUTDOWN"; case ESPIPE: return "ESPIPE"; case ESOCKTNOSUPPORT: return "ESOCKTNOSUPPORT"; case ESRCH: return "ESRCH"; case ESTALE: return "ESTALE"; case ETIME: return "ETIME"; case ETIMEDOUT: return "ETIMEDOUT"; case ETXTBSY: return "ETXTBSY"; case EUNATCH: return "EUNATCH"; case EUSERS: return "EUSERS"; /* case EWOULDBLOCK: return "EWOULDBLOCK"; */ case EXDEV: return "EXDEV"; #ifdef EBADE // Not available on OS X case EBADE: return "EBADE"; case EBADFD: return "EBADFD"; case EBADSLT: return "EBADSLT"; case EDEADLOCK: return "EDEADLOCK"; case EBADR: return "EBADR"; case EBADRQC: return "EBADRQC"; case ECHRNG: return "ECHRNG"; #ifdef EISNAM // Not available on OS X, Illumos, Solaris case EISNAM: return "EISNAM"; case EKEYEXPIRED: return "EKEYEXPIRED"; case EKEYREJECTED: return "EKEYREJECTED"; case EKEYREVOKED: return "EKEYREVOKED"; #endif case EL2HLT: return "EL2HLT"; case EL2NSYNC: return "EL2NSYNC"; case EL3HLT: return "EL3HLT"; case EL3RST: return "EL3RST"; case ELIBBAD: return "ELIBBAD"; case ELIBMAX: return "ELIBMAX"; case ELIBSCN: return "ELIBSCN"; case ELIBEXEC: return "ELIBEXEC"; #ifdef ENOMEDIUM // Not available on OS X, Illumos, Solaris case ENOMEDIUM: return "ENOMEDIUM"; case EMEDIUMTYPE: return "EMEDIUMTYPE"; #endif case ENONET: return "ENONET"; case ENOPKG: return "ENOPKG"; case EREMCHG: return "EREMCHG"; case ERESTART: return "ERESTART"; case ESTRPIPE: return "ESTRPIPE"; #ifdef EUCLEAN // Not available on OS X, Illumos, Solaris case EUCLEAN: return "EUCLEAN"; #endif case EXFULL: return "EXFULL"; #endif // EBADE default: return "Unknown"; } } /*! * \brief Get a user-friendly description of a return code * * \param[in] rc Integer return code to convert * * \return String description of rc */ const char * pcmk_rc_str(int rc) { if (rc == pcmk_rc_ok) { return "OK"; } if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) { return pcmk__rcs[pcmk_rc_error - rc].desc; } if (rc < 0) { return "Unknown error"; } return strerror(rc); } // This returns negative values for errors //! \deprecated Use standard return codes instead int pcmk_rc2legacy(int rc) { if (rc >= 0) { return -rc; // OK or system errno } if ((rc <= pcmk_rc_error) && ((pcmk_rc_error - rc) < PCMK__N_RC)) { return pcmk__rcs[pcmk_rc_error - rc].legacy_rc; } return -pcmk_err_generic; } //! \deprecated Use standard return codes instead int pcmk_legacy2rc(int legacy_rc) { legacy_rc = abs(legacy_rc); switch (legacy_rc) { case pcmk_err_no_quorum: return pcmk_rc_no_quorum; case pcmk_err_schema_validation: return pcmk_rc_schema_validation; case pcmk_err_schema_unchanged: return pcmk_rc_schema_unchanged; case pcmk_err_transform_failed: return pcmk_rc_transform_failed; case pcmk_err_old_data: return pcmk_rc_old_data; case pcmk_err_diff_failed: return pcmk_rc_diff_failed; case pcmk_err_diff_resync: return pcmk_rc_diff_resync; case pcmk_err_cib_modified: return pcmk_rc_cib_modified; case pcmk_err_cib_backup: return pcmk_rc_cib_backup; case pcmk_err_cib_save: return pcmk_rc_cib_save; case pcmk_err_cib_corrupt: return pcmk_rc_cib_corrupt; case pcmk_err_multiple: return pcmk_rc_multiple; case pcmk_err_node_unknown: return pcmk_rc_node_unknown; case pcmk_err_already: return pcmk_rc_already; case pcmk_err_bad_nvpair: return pcmk_rc_bad_nvpair; case pcmk_err_unknown_format: return pcmk_rc_unknown_format; case pcmk_err_generic: return pcmk_rc_error; case pcmk_ok: return pcmk_rc_ok; default: return legacy_rc; // system errno } } // Exit status codes const char * crm_exit_name(crm_exit_t exit_code) { switch (exit_code) { case CRM_EX_OK: return "CRM_EX_OK"; case CRM_EX_ERROR: return "CRM_EX_ERROR"; case CRM_EX_INVALID_PARAM: return "CRM_EX_INVALID_PARAM"; case CRM_EX_UNIMPLEMENT_FEATURE: return "CRM_EX_UNIMPLEMENT_FEATURE"; case CRM_EX_INSUFFICIENT_PRIV: return "CRM_EX_INSUFFICIENT_PRIV"; case CRM_EX_NOT_INSTALLED: return "CRM_EX_NOT_INSTALLED"; case CRM_EX_NOT_CONFIGURED: return "CRM_EX_NOT_CONFIGURED"; case CRM_EX_NOT_RUNNING: return "CRM_EX_NOT_RUNNING"; case CRM_EX_USAGE: return "CRM_EX_USAGE"; case CRM_EX_DATAERR: return "CRM_EX_DATAERR"; case CRM_EX_NOINPUT: return "CRM_EX_NOINPUT"; case CRM_EX_NOUSER: return "CRM_EX_NOUSER"; case CRM_EX_NOHOST: return "CRM_EX_NOHOST"; case CRM_EX_UNAVAILABLE: return "CRM_EX_UNAVAILABLE"; case CRM_EX_SOFTWARE: return "CRM_EX_SOFTWARE"; case CRM_EX_OSERR: return "CRM_EX_OSERR"; case CRM_EX_OSFILE: return "CRM_EX_OSFILE"; case CRM_EX_CANTCREAT: return "CRM_EX_CANTCREAT"; case CRM_EX_IOERR: return "CRM_EX_IOERR"; case CRM_EX_TEMPFAIL: return "CRM_EX_TEMPFAIL"; case CRM_EX_PROTOCOL: return "CRM_EX_PROTOCOL"; case CRM_EX_NOPERM: return "CRM_EX_NOPERM"; case CRM_EX_CONFIG: return "CRM_EX_CONFIG"; case CRM_EX_FATAL: return "CRM_EX_FATAL"; case CRM_EX_PANIC: return "CRM_EX_PANIC"; case CRM_EX_DISCONNECT: return "CRM_EX_DISCONNECT"; case CRM_EX_DIGEST: return "CRM_EX_DIGEST"; case CRM_EX_NOSUCH: return "CRM_EX_NOSUCH"; case CRM_EX_QUORUM: return "CRM_EX_QUORUM"; case CRM_EX_UNSAFE: return "CRM_EX_UNSAFE"; case CRM_EX_EXISTS: return "CRM_EX_EXISTS"; case CRM_EX_MULTIPLE: return "CRM_EX_MULTIPLE"; case CRM_EX_EXPIRED: return "CRM_EX_EXPIRED"; case CRM_EX_NOT_YET_IN_EFFECT: return "CRM_EX_NOT_YET_IN_EFFECT"; case CRM_EX_INDETERMINATE: return "CRM_EX_INDETERMINATE"; case CRM_EX_UNSATISFIED: return "CRM_EX_UNSATISFIED"; case CRM_EX_OLD: return "CRM_EX_OLD"; case CRM_EX_TIMEOUT: return "CRM_EX_TIMEOUT"; case CRM_EX_MAX: return "CRM_EX_UNKNOWN"; } return "CRM_EX_UNKNOWN"; } const char * crm_exit_str(crm_exit_t exit_code) { switch (exit_code) { case CRM_EX_OK: return "OK"; case CRM_EX_ERROR: return "Error occurred"; case CRM_EX_INVALID_PARAM: return "Invalid parameter"; case CRM_EX_UNIMPLEMENT_FEATURE: return "Unimplemented"; case CRM_EX_INSUFFICIENT_PRIV: return "Insufficient privileges"; case CRM_EX_NOT_INSTALLED: return "Not installed"; case CRM_EX_NOT_CONFIGURED: return "Not configured"; case CRM_EX_NOT_RUNNING: return "Not running"; case CRM_EX_USAGE: return "Incorrect usage"; case CRM_EX_DATAERR: return "Invalid data given"; case CRM_EX_NOINPUT: return "Input file not available"; case CRM_EX_NOUSER: return "User does not exist"; case CRM_EX_NOHOST: return "Host does not exist"; case CRM_EX_UNAVAILABLE: return "Necessary service unavailable"; case CRM_EX_SOFTWARE: return "Internal software bug"; case CRM_EX_OSERR: return "Operating system error occurred"; case CRM_EX_OSFILE: return "System file not available"; case CRM_EX_CANTCREAT: return "Cannot create output file"; case CRM_EX_IOERR: return "I/O error occurred"; case CRM_EX_TEMPFAIL: return "Temporary failure, try again"; case CRM_EX_PROTOCOL: return "Protocol violated"; case CRM_EX_NOPERM: return "Insufficient privileges"; case CRM_EX_CONFIG: return "Invalid configuration"; case CRM_EX_FATAL: return "Fatal error occurred, will not respawn"; case CRM_EX_PANIC: return "System panic required"; case CRM_EX_DISCONNECT: return "Not connected"; case CRM_EX_DIGEST: return "Digest mismatch"; case CRM_EX_NOSUCH: return "No such object"; case CRM_EX_QUORUM: return "Quorum required"; case CRM_EX_UNSAFE: return "Operation not safe"; case CRM_EX_EXISTS: return "Requested item already exists"; case CRM_EX_MULTIPLE: return "Multiple items match request"; case CRM_EX_EXPIRED: return "Requested item has expired"; case CRM_EX_NOT_YET_IN_EFFECT: return "Requested item is not yet in effect"; case CRM_EX_INDETERMINATE: return "Could not determine status"; case CRM_EX_UNSATISFIED: return "Not applicable under current conditions"; case CRM_EX_OLD: return "Update was older than existing configuration"; case CRM_EX_TIMEOUT: return "Timeout occurred"; case CRM_EX_MAX: return "Error occurred"; } if ((exit_code > 128) && (exit_code < CRM_EX_MAX)) { return "Interrupted by signal"; } return "Unknown exit status"; } //! \deprecated Use standard return codes and pcmk_rc2exitc() instead crm_exit_t crm_errno2exit(int rc) { rc = abs(rc); // Convenience for functions that return -errno switch (rc) { case pcmk_ok: return CRM_EX_OK; case pcmk_err_no_quorum: return CRM_EX_QUORUM; case pcmk_err_old_data: return CRM_EX_OLD; case pcmk_err_schema_validation: case pcmk_err_transform_failed: return CRM_EX_CONFIG; case pcmk_err_bad_nvpair: return CRM_EX_INVALID_PARAM; case pcmk_err_already: return CRM_EX_EXISTS; case pcmk_err_multiple: return CRM_EX_MULTIPLE; case pcmk_err_node_unknown: case pcmk_err_unknown_format: return CRM_EX_NOSUCH; default: return pcmk_rc2exitc(rc); // system errno } } /*! * \brief Map a function return code to the most similar exit code * * \param[in] rc Function return code * * \return Most similar exit code */ crm_exit_t pcmk_rc2exitc(int rc) { switch (rc) { case pcmk_rc_ok: return CRM_EX_OK; case pcmk_rc_no_quorum: return CRM_EX_QUORUM; case pcmk_rc_old_data: return CRM_EX_OLD; case pcmk_rc_schema_validation: case pcmk_rc_transform_failed: return CRM_EX_CONFIG; case pcmk_rc_bad_nvpair: return CRM_EX_INVALID_PARAM; case EACCES: return CRM_EX_INSUFFICIENT_PRIV; case EBADF: case EINVAL: case EFAULT: case ENOSYS: case EOVERFLOW: return CRM_EX_SOFTWARE; case EBADMSG: case EMSGSIZE: case ENOMSG: case ENOPROTOOPT: case EPROTO: case EPROTONOSUPPORT: case EPROTOTYPE: return CRM_EX_PROTOCOL; case ECOMM: case ENOMEM: return CRM_EX_OSERR; case ECONNABORTED: case ECONNREFUSED: case ECONNRESET: case ENOTCONN: return CRM_EX_DISCONNECT; case EEXIST: case pcmk_rc_already: return CRM_EX_EXISTS; case EIO: case pcmk_rc_no_output: return CRM_EX_IOERR; case ENOTSUP: #if EOPNOTSUPP != ENOTSUP case EOPNOTSUPP: #endif return CRM_EX_UNIMPLEMENT_FEATURE; case ENOTUNIQ: case pcmk_rc_multiple: return CRM_EX_MULTIPLE; case ENXIO: case pcmk_rc_node_unknown: case pcmk_rc_unknown_format: return CRM_EX_NOSUCH; case ETIME: case ETIMEDOUT: return CRM_EX_TIMEOUT; case EAGAIN: case EBUSY: return CRM_EX_UNSATISFIED; case pcmk_rc_before_range: return CRM_EX_NOT_YET_IN_EFFECT; case pcmk_rc_after_range: return CRM_EX_EXPIRED; case pcmk_rc_undetermined: return CRM_EX_INDETERMINATE; case pcmk_rc_op_unsatisfied: return CRM_EX_UNSATISFIED; case pcmk_rc_within_range: return CRM_EX_OK; default: return CRM_EX_ERROR; } } // Other functions const char * bz2_strerror(int rc) { // See ftp://sources.redhat.com/pub/bzip2/docs/manual_3.html#SEC17 switch (rc) { case BZ_OK: case BZ_RUN_OK: case BZ_FLUSH_OK: case BZ_FINISH_OK: case BZ_STREAM_END: return "Ok"; case BZ_CONFIG_ERROR: return "libbz2 has been improperly compiled on your platform"; case BZ_SEQUENCE_ERROR: return "library functions called in the wrong order"; case BZ_PARAM_ERROR: return "parameter is out of range or otherwise incorrect"; case BZ_MEM_ERROR: return "memory allocation failed"; case BZ_DATA_ERROR: return "data integrity error is detected during decompression"; case BZ_DATA_ERROR_MAGIC: return "the compressed stream does not start with the correct magic bytes"; case BZ_IO_ERROR: return "error reading or writing in the compressed file"; case BZ_UNEXPECTED_EOF: return "compressed file finishes before the logical end of stream is detected"; case BZ_OUTBUFF_FULL: return "output data will not fit into the buffer provided"; } return "Unknown error"; } crm_exit_t crm_exit(crm_exit_t rc) { /* A compiler could theoretically use any type for crm_exit_t, but an int * should always hold it, so cast to int to keep static analysis happy. */ if ((((int) rc) < 0) || (((int) rc) > CRM_EX_MAX)) { rc = CRM_EX_ERROR; } mainloop_cleanup(); crm_xml_cleanup(); - qb_log_fini(); pcmk__cli_option_cleanup(); if (crm_system_name) { crm_info("Exiting %s " CRM_XS " with status %d", crm_system_name, rc); free(crm_system_name); } else { crm_trace("Exiting with status %d", rc); } + qb_log_fini(); // Don't log anything after this point exit(rc); } diff --git a/lib/common/strings.c b/lib/common/strings.c index 9bda336a47..cae60345ae 100644 --- a/lib/common/strings.c +++ b/lib/common/strings.c @@ -1,657 +1,657 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include char * crm_itoa_stack(int an_int, char *buffer, size_t len) { if (buffer != NULL) { snprintf(buffer, len, "%d", an_int); } return buffer; } /*! * \internal * \brief Scan a long long integer from a string * * \param[in] text String to scan * \param[out] result If not NULL, where to store scanned value * \param[out] end_text If not NULL, where to store pointer to just after value * * \return Standard Pacemaker return code (also set errno on error) */ static int scan_ll(const char *text, long long *result, char **end_text) { long long local_result = -1; char *local_end_text = NULL; int rc = pcmk_rc_ok; errno = 0; if (text != NULL) { #ifdef ANSI_ONLY local_result = (long long) strtol(text, &local_end_text, 10); #else local_result = strtoll(text, &local_end_text, 10); #endif if (errno == ERANGE) { rc = errno; crm_warn("Integer parsed from %s was clipped to %lld", text, local_result); } else if (errno != 0) { rc = errno; local_result = -1; crm_err("Could not parse integer from %s (using -1 instead): %s", text, pcmk_rc_str(rc)); } else if (local_end_text == text) { rc = EINVAL; local_result = -1; crm_err("Could not parse integer from %s (using -1 instead): " "No digits found", text); } if ((end_text == NULL) && (local_end_text != NULL) && (local_end_text[0] != '\0')) { crm_warn("Characters left over after parsing '%s': '%s'", text, local_end_text); } errno = rc; } if (end_text != NULL) { *end_text = local_end_text; } if (result != NULL) { *result = local_result; } return rc; } /*! * \brief Parse a long long integer value from a string * * \param[in] text The string to parse * \param[in] default_text Default string to parse if text is NULL * * \return Parsed value on success, -1 (and set errno) on error */ long long crm_parse_ll(const char *text, const char *default_text) { long long result; if (text == NULL) { text = default_text; if (text == NULL) { crm_err("No default conversion value supplied"); errno = EINVAL; return -1; } } scan_ll(text, &result, NULL); return result; } /*! * \brief Parse an integer value from a string * * \param[in] text The string to parse * \param[in] default_text Default string to parse if text is NULL * * \return Parsed value on success, INT_MIN or INT_MAX (and set errno to ERANGE) * if parsed value is out of integer range, otherwise -1 (and set errno) */ int crm_parse_int(const char *text, const char *default_text) { long long result = crm_parse_ll(text, default_text); if (result < INT_MIN) { // If errno is ERANGE, crm_parse_ll() has already logged a message if (errno != ERANGE) { crm_err("Conversion of %s was clipped: %lld", text, result); errno = ERANGE; } return INT_MIN; } else if (result > INT_MAX) { // If errno is ERANGE, crm_parse_ll() has already logged a message if (errno != ERANGE) { crm_err("Conversion of %s was clipped: %lld", text, result); errno = ERANGE; } return INT_MAX; } return (int) result; } /*! * \internal * \brief Parse a guint from a string stored in a hash table * * \param[in] table Hash table to search * \param[in] key Hash table key to use to retrieve string * \param[in] default_val What to use if key has no entry in table * \param[out] result If not NULL, where to store parsed integer * * \return Standard Pacemaker return code */ int pcmk__guint_from_hash(GHashTable *table, const char *key, guint default_val, guint *result) { const char *value; long long value_ll; CRM_CHECK((table != NULL) && (key != NULL), return EINVAL); value = g_hash_table_lookup(table, key); if (value == NULL) { if (result != NULL) { *result = default_val; } return pcmk_rc_ok; } errno = 0; value_ll = crm_parse_ll(value, NULL); if (errno != 0) { return errno; // Message already logged } if ((value_ll < 0) || (value_ll > G_MAXUINT)) { crm_warn("Could not parse non-negative integer from %s", value); return ERANGE; } if (result != NULL) { *result = (guint) value_ll; } return pcmk_rc_ok; } #ifndef NUMCHARS # define NUMCHARS "0123456789." #endif #ifndef WHITESPACE # define WHITESPACE " \t\n\r\f" #endif /*! * \brief Parse a time+units string and return milliseconds equivalent * * \param[in] input String with a number and units (optionally with whitespace * before and/or after the number) * * \return Milliseconds corresponding to string expression, or -1 on error */ long long crm_get_msec(const char *input) { const char *num_start = NULL; const char *units; long long multiplier = 1000; long long divisor = 1; long long msec = -1; size_t num_len = 0; char *end_text = NULL; if (input == NULL) { return -1; } num_start = input + strspn(input, WHITESPACE); num_len = strspn(num_start, NUMCHARS); if (num_len < 1) { return -1; } units = num_start + num_len; units += strspn(units, WHITESPACE); if (!strncasecmp(units, "ms", 2) || !strncasecmp(units, "msec", 4)) { multiplier = 1; divisor = 1; } else if (!strncasecmp(units, "us", 2) || !strncasecmp(units, "usec", 4)) { multiplier = 1; divisor = 1000; } else if (!strncasecmp(units, "s", 1) || !strncasecmp(units, "sec", 3)) { multiplier = 1000; divisor = 1; } else if (!strncasecmp(units, "m", 1) || !strncasecmp(units, "min", 3)) { multiplier = 60 * 1000; divisor = 1; } else if (!strncasecmp(units, "h", 1) || !strncasecmp(units, "hr", 2)) { multiplier = 60 * 60 * 1000; divisor = 1; } else if ((*units != EOS) && (*units != '\n') && (*units != '\r')) { return -1; } scan_ll(num_start, &msec, &end_text); if (msec > (LLONG_MAX / multiplier)) { // Arithmetics overflow while multiplier/divisor mutually exclusive return LLONG_MAX; } msec *= multiplier; msec /= divisor; return msec; } gboolean safe_str_neq(const char *a, const char *b) { if (a == b) { return FALSE; } else if (a == NULL || b == NULL) { return TRUE; } else if (strcasecmp(a, b) == 0) { return FALSE; } return TRUE; } gboolean crm_is_true(const char *s) { gboolean ret = FALSE; if (s != NULL) { crm_str_to_boolean(s, &ret); } return ret; } int crm_str_to_boolean(const char *s, int *ret) { if (s == NULL) { return -1; } else if (strcasecmp(s, "true") == 0 || strcasecmp(s, "on") == 0 || strcasecmp(s, "yes") == 0 || strcasecmp(s, "y") == 0 || strcasecmp(s, "1") == 0) { *ret = TRUE; return 1; } else if (strcasecmp(s, "false") == 0 || strcasecmp(s, "off") == 0 || strcasecmp(s, "no") == 0 || strcasecmp(s, "n") == 0 || strcasecmp(s, "0") == 0) { *ret = FALSE; return 1; } return -1; } char * crm_strip_trailing_newline(char *str) { int len; if (str == NULL) { return str; } for (len = strlen(str) - 1; len >= 0 && str[len] == '\n'; len--) { str[len] = '\0'; } return str; } gboolean crm_str_eq(const char *a, const char *b, gboolean use_case) { if (use_case) { return g_strcmp0(a, b) == 0; /* TODO - Figure out which calls, if any, really need to be case independent */ } else if (a == b) { return TRUE; } else if (a == NULL || b == NULL) { /* shouldn't be comparing NULLs */ return FALSE; } else if (strcasecmp(a, b) == 0) { return TRUE; } return FALSE; } /*! * \brief Check whether a string starts with a certain sequence * * \param[in] str String to check - * \param[in] match Sequence to match against beginning of \p str + * \param[in] prefix Sequence to match against beginning of \p str * * \return \c true if \p str begins with match, \c false otherwise * \note This is equivalent to !strncmp(s, prefix, strlen(prefix)) * but is likely less efficient when prefix is a string literal * if the compiler optimizes away the strlen() at compile time, * and more efficient otherwise. */ bool pcmk__starts_with(const char *str, const char *prefix) { const char *s = str; const char *p = prefix; if (!s || !p) { return false; } while (*s && *p) { if (*s++ != *p++) { return false; } } return (*p == 0); } static inline bool ends_with(const char *s, const char *match, bool as_extension) { if (pcmk__str_empty(match)) { return true; } else if (s == NULL) { return false; } else { size_t slen, mlen; /* Besides as_extension, we could also check !strchr(&match[1], match[0]) but that would be inefficient. */ if (as_extension) { s = strrchr(s, match[0]); return (s == NULL)? false : !strcmp(s, match); } mlen = strlen(match); slen = strlen(s); return ((slen >= mlen) && !strcmp(s + slen - mlen, match)); } } /*! * \internal * \brief Check whether a string ends with a certain sequence * * \param[in] s String to check * \param[in] match Sequence to match against end of \p s * * \return \c true if \p s ends case-sensitively with match, \c false otherwise * \note pcmk__ends_with_ext() can be used if the first character of match * does not recur in match. */ bool pcmk__ends_with(const char *s, const char *match) { return ends_with(s, match, false); } /*! * \internal * \brief Check whether a string ends with a certain "extension" * * \param[in] s String to check * \param[in] match Extension to match against end of \p s, that is, * its first character must not occur anywhere * in the rest of that very sequence (example: file * extension where the last dot is its delimiter, * e.g., ".html"); incorrect results may be * returned otherwise. * * \return \c true if \p s ends (verbatim, i.e., case sensitively) * with "extension" designated as \p match (including empty * string), \c false otherwise * * \note Main incentive to prefer this function over \c pcmk__ends_with() * where possible is the efficiency (at the cost of added * restriction on \p match as stated; the complexity class * remains the same, though: BigO(M+N) vs. BigO(M+2N)). */ bool pcmk__ends_with_ext(const char *s, const char *match) { return ends_with(s, match, true); } /* * This re-implements g_str_hash as it was prior to glib2-2.28: * * https://gitlab.gnome.org/GNOME/glib/commit/354d655ba8a54b754cb5a3efb42767327775696c * * Note that the new g_str_hash is presumably a *better* hash (it's actually * a correct implementation of DJB's hash), but we need to preserve existing * behaviour, because the hash key ultimately determines the "sort" order * when iterating through GHashTables, which affects allocation of scores to * clone instances when iterating through rsc->allowed_nodes. It (somehow) * also appears to have some minor impact on the ordering of a few * pseudo_event IDs in the transition graph. */ guint g_str_hash_traditional(gconstpointer v) { const signed char *p; guint32 h = 0; for (p = v; *p != '\0'; p++) h = (h << 5) - h + *p; return h; } /* used with hash tables where case does not matter */ gboolean crm_strcase_equal(gconstpointer a, gconstpointer b) { return crm_str_eq((const char *) a, (const char *) b, FALSE); } guint crm_strcase_hash(gconstpointer v) { const signed char *p; guint32 h = 0; for (p = v; *p != '\0'; p++) h = (h << 5) - h + g_ascii_tolower(*p); return h; } static void copy_str_table_entry(gpointer key, gpointer value, gpointer user_data) { if (key && value && user_data) { g_hash_table_insert((GHashTable*)user_data, strdup(key), strdup(value)); } } GHashTable * crm_str_table_dup(GHashTable *old_table) { GHashTable *new_table = NULL; if (old_table) { new_table = crm_str_table_new(); g_hash_table_foreach(old_table, copy_str_table_entry, new_table); } return new_table; } /*! * \internal * \brief Add a word to a space-separated string list * * \param[in,out] list Pointer to beginning of list * \param[in] word Word to add to list * * \return (Potentially new) beginning of list * \note This dynamically reallocates list as needed. */ char * pcmk__add_word(char *list, const char *word) { if (word != NULL) { size_t len = list? strlen(list) : 0; list = realloc_safe(list, len + strlen(word) + 2); // 2 = space + EOS sprintf(list + len, " %s", word); } return list; } /*! * \internal * \brief Compress data * * \param[in] data Data to compress * \param[in] length Number of characters of data to compress * \param[in] max Maximum size of compressed data (or 0 to estimate) * \param[out] result Where to store newly allocated compressed result * \param[out] result_len Where to store actual compressed length of result * * \return Standard Pacemaker return code */ int pcmk__compress(const char *data, unsigned int length, unsigned int max, char **result, unsigned int *result_len) { int rc; char *compressed = NULL; char *uncompressed = strdup(data); #ifdef CLOCK_MONOTONIC struct timespec after_t; struct timespec before_t; #endif if (max == 0) { max = (length * 1.01) + 601; // Size guaranteed to hold result } #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &before_t); #endif compressed = calloc((size_t) max, sizeof(char)); CRM_ASSERT(compressed); *result_len = max; rc = BZ2_bzBuffToBuffCompress(compressed, result_len, uncompressed, length, CRM_BZ2_BLOCKS, 0, CRM_BZ2_WORK); free(uncompressed); if (rc != BZ_OK) { crm_err("Compression of %d bytes failed: %s " CRM_XS " bzerror=%d", length, bz2_strerror(rc), rc); free(compressed); return pcmk_rc_error; } #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &after_t); crm_trace("Compressed %d bytes into %d (ratio %d:1) in %.0fms", length, *result_len, length / (*result_len), (after_t.tv_sec - before_t.tv_sec) * 1000 + (after_t.tv_nsec - before_t.tv_nsec) / 1e6); #else crm_trace("Compressed %d bytes into %d (ratio %d:1)", length, *result_len, length / (*result_len)); #endif *result = compressed; return pcmk_rc_ok; } char * crm_strdup_printf(char const *format, ...) { va_list ap; int len = 0; char *string = NULL; va_start(ap, format); len = vasprintf (&string, format, ap); CRM_ASSERT(len > 0); va_end(ap); return string; } int pcmk__parse_ll_range(const char *srcstring, long long *start, long long *end) { char *remainder = NULL; CRM_ASSERT(start != NULL && end != NULL); *start = -1; *end = -1; crm_trace("Attempting to decode: [%s]", srcstring); if (srcstring == NULL || strcmp(srcstring, "") == 0 || strcmp(srcstring, "-") == 0) { return pcmk_rc_unknown_format; } /* String starts with a dash, so this is either a range with * no beginning or garbage. * */ if (*srcstring == '-') { int rc = scan_ll(srcstring+1, end, &remainder); if (rc != pcmk_rc_ok || *remainder != '\0') { return pcmk_rc_unknown_format; } else { return pcmk_rc_ok; } } if (scan_ll(srcstring, start, &remainder) != pcmk_rc_ok) { return pcmk_rc_unknown_format; } if (*remainder && *remainder == '-') { if (*(remainder+1)) { char *more_remainder = NULL; int rc = scan_ll(remainder+1, end, &more_remainder); if (rc != pcmk_rc_ok || *more_remainder != '\0') { return pcmk_rc_unknown_format; } } } else if (*remainder && *remainder != '-') { *start = -1; return pcmk_rc_unknown_format; } else { /* The input string contained only one number. Set start and end * to the same value and return pcmk_rc_ok. This gives the caller * a way to tell this condition apart from a range with no end. */ *end = *start; } return pcmk_rc_ok; } diff --git a/lib/common/utils.c b/lib/common/utils.c index dc358c34df..13e7cb2b29 100644 --- a/lib/common/utils.c +++ b/lib/common/utils.c @@ -1,661 +1,627 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include -#include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#ifndef MAXLINE -# define MAXLINE 512 -#endif - #ifndef PW_BUFFER_LEN # define PW_BUFFER_LEN 500 #endif CRM_TRACE_INIT_DATA(common); gboolean crm_config_error = FALSE; gboolean crm_config_warning = FALSE; char *crm_system_name = NULL; -int node_score_red = 0; -int node_score_green = 0; -int node_score_yellow = 0; +int pcmk__score_red = 0; +int pcmk__score_green = 0; +int pcmk__score_yellow = 0; int char2score(const char *score) { int score_f = 0; if (score == NULL) { } else if (pcmk_str_is_minus_infinity(score)) { score_f = -CRM_SCORE_INFINITY; } else if (pcmk_str_is_infinity(score)) { score_f = CRM_SCORE_INFINITY; } else if (safe_str_eq(score, "red")) { - score_f = node_score_red; + score_f = pcmk__score_red; } else if (safe_str_eq(score, "yellow")) { - score_f = node_score_yellow; + score_f = pcmk__score_yellow; } else if (safe_str_eq(score, "green")) { - score_f = node_score_green; + score_f = pcmk__score_green; } else { score_f = crm_parse_int(score, NULL); if (score_f > 0 && score_f > CRM_SCORE_INFINITY) { score_f = CRM_SCORE_INFINITY; } else if (score_f < 0 && score_f < -CRM_SCORE_INFINITY) { score_f = -CRM_SCORE_INFINITY; } } return score_f; } char * score2char_stack(int score, char *buf, size_t len) { if (score >= CRM_SCORE_INFINITY) { strncpy(buf, CRM_INFINITY_S, 9); } else if (score <= -CRM_SCORE_INFINITY) { strncpy(buf, CRM_MINUS_INFINITY_S , 10); } else { return crm_itoa_stack(score, buf, len); } return buf; } char * score2char(int score) { if (score >= CRM_SCORE_INFINITY) { return strdup(CRM_INFINITY_S); } else if (score <= -CRM_SCORE_INFINITY) { return strdup(CRM_MINUS_INFINITY_S); } return crm_itoa(score); } char * generate_hash_key(const char *crm_msg_reference, const char *sys) { char *hash_key = crm_strdup_printf("%s_%s", (sys? sys : "none"), crm_msg_reference); crm_trace("created hash key: (%s)", hash_key); return hash_key; } int crm_user_lookup(const char *name, uid_t * uid, gid_t * gid) { int rc = pcmk_ok; char *buffer = NULL; struct passwd pwd; struct passwd *pwentry = NULL; buffer = calloc(1, PW_BUFFER_LEN); if (buffer == NULL) { return -ENOMEM; } rc = getpwnam_r(name, &pwd, buffer, PW_BUFFER_LEN, &pwentry); if (pwentry) { if (uid) { *uid = pwentry->pw_uid; } if (gid) { *gid = pwentry->pw_gid; } crm_trace("User %s has uid=%d gid=%d", name, pwentry->pw_uid, pwentry->pw_gid); } else { rc = rc? -rc : -EINVAL; crm_info("User %s lookup: %s", name, pcmk_strerror(rc)); } free(buffer); return rc; } /*! * \brief Get user and group IDs of pacemaker daemon user * * \param[out] uid If non-NULL, where to store daemon user ID * \param[out] gid If non-NULL, where to store daemon group ID * * \return pcmk_ok on success, -errno otherwise */ int pcmk_daemon_user(uid_t *uid, gid_t *gid) { static uid_t daemon_uid; static gid_t daemon_gid; static bool found = false; int rc = pcmk_err_generic; if (!found) { rc = crm_user_lookup(CRM_DAEMON_USER, &daemon_uid, &daemon_gid); if (rc == pcmk_ok) { found = true; } } if (found) { if (uid) { *uid = daemon_uid; } if (gid) { *gid = daemon_gid; } } return rc; } static int crm_version_helper(const char *text, const char **end_text) { int atoi_result = -1; CRM_ASSERT(end_text != NULL); errno = 0; if (text != NULL && text[0] != 0) { /* seemingly sacrificing const-correctness -- because while strtol doesn't modify the input, it doesn't want to artificially taint the "end_text" pointer-to-pointer-to-first-char-in-string with constness in case the input wasn't actually constant -- by semantic definition not a single character will get modified so it shall be perfectly safe to make compiler happy with dropping "const" qualifier here */ atoi_result = (int) strtol(text, (char **) end_text, 10); if (errno == EINVAL) { crm_err("Conversion of '%s' %c failed", text, text[0]); atoi_result = -1; } } return atoi_result; } /* * version1 < version2 : -1 * version1 = version2 : 0 * version1 > version2 : 1 */ int compare_version(const char *version1, const char *version2) { int rc = 0; int lpc = 0; const char *ver1_iter, *ver2_iter; if (version1 == NULL && version2 == NULL) { return 0; } else if (version1 == NULL) { return -1; } else if (version2 == NULL) { return 1; } ver1_iter = version1; ver2_iter = version2; while (1) { int digit1 = 0; int digit2 = 0; lpc++; if (ver1_iter == ver2_iter) { break; } if (ver1_iter != NULL) { digit1 = crm_version_helper(ver1_iter, &ver1_iter); } if (ver2_iter != NULL) { digit2 = crm_version_helper(ver2_iter, &ver2_iter); } if (digit1 < digit2) { rc = -1; break; } else if (digit1 > digit2) { rc = 1; break; } if (ver1_iter != NULL && *ver1_iter == '.') { ver1_iter++; } if (ver1_iter != NULL && *ver1_iter == '\0') { ver1_iter = NULL; } if (ver2_iter != NULL && *ver2_iter == '.') { ver2_iter++; } if (ver2_iter != NULL && *ver2_iter == 0) { ver2_iter = NULL; } } if (rc == 0) { crm_trace("%s == %s (%d)", version1, version2, lpc); } else if (rc < 0) { crm_trace("%s < %s (%d)", version1, version2, lpc); } else if (rc > 0) { crm_trace("%s > %s (%d)", version1, version2, lpc); } return rc; } /*! * \brief Parse milliseconds from a Pacemaker interval specification * * \param[in] input Pacemaker time interval specification (a bare number of * seconds, a number with a unit optionally with whitespace * before and/or after the number, or an ISO 8601 duration) * * \return Milliseconds equivalent of given specification on success (limited * to the range of an unsigned integer), 0 if input is NULL, * or 0 (and set errno to EINVAL) on error */ guint crm_parse_interval_spec(const char *input) { long long msec = -1; errno = 0; if (input == NULL) { return 0; } else if (input[0] == 'P') { crm_time_t *period_s = crm_time_parse_duration(input); if (period_s) { msec = 1000 * crm_time_get_seconds(period_s); crm_time_free(period_s); } } else { msec = crm_get_msec(input); } if (msec < 0) { crm_warn("Using 0 instead of '%s'", input); errno = EINVAL; return 0; } return (msec >= G_MAXUINT)? G_MAXUINT : (guint) msec; } extern bool crm_is_daemon; /* coverity[+kill] */ void crm_abort(const char *file, const char *function, int line, const char *assert_condition, gboolean do_core, gboolean do_fork) { int rc = 0; int pid = 0; int status = 0; /* Implied by the parent's error logging below */ /* crm_write_blackbox(0); */ if(crm_is_daemon == FALSE) { /* This is a command line tool - do not fork */ /* crm_add_logfile(NULL); * Record it to a file? */ crm_enable_stderr(TRUE); /* Make sure stderr is enabled so we can tell the caller */ do_fork = FALSE; /* Just crash if needed */ } if (do_core == FALSE) { crm_err("%s: Triggered assert at %s:%d : %s", function, file, line, assert_condition); return; } else if (do_fork) { pid = fork(); } else { crm_err("%s: Triggered fatal assert at %s:%d : %s", function, file, line, assert_condition); } if (pid == -1) { crm_crit("%s: Cannot create core for non-fatal assert at %s:%d : %s", function, file, line, assert_condition); return; } else if(pid == 0) { /* Child process */ abort(); return; } /* Parent process */ crm_err("%s: Forked child %d to record non-fatal assert at %s:%d : %s", function, pid, file, line, assert_condition); crm_write_blackbox(SIGTRAP, NULL); do { rc = waitpid(pid, &status, 0); if(rc == pid) { return; /* Job done */ } } while(errno == EINTR); if (errno == ECHILD) { /* crm_mon does this */ crm_trace("Cannot wait on forked child %d - SIGCHLD is probably set to SIG_IGN", pid); return; } crm_perror(LOG_ERR, "Cannot wait on forked child %d", pid); } void crm_make_daemon(const char *name, gboolean daemonize, const char *pidfile) { int rc; pid_t pid; - const char *devnull = "/dev/null"; if (daemonize == FALSE) { return; } /* Check before we even try... */ rc = pcmk__pidfile_matches(pidfile, 1, name, &pid); if ((rc != pcmk_rc_ok) && (rc != ENOENT)) { crm_err("%s: already running [pid %lld in %s]", name, (long long) pid, pidfile); printf("%s: already running [pid %lld in %s]\n", name, (long long) pid, pidfile); crm_exit(CRM_EX_ERROR); } pid = fork(); if (pid < 0) { fprintf(stderr, "%s: could not start daemon\n", name); crm_perror(LOG_ERR, "fork"); crm_exit(CRM_EX_OSERR); } else if (pid > 0) { crm_exit(CRM_EX_OK); } rc = pcmk__lock_pidfile(pidfile, name); if (rc != pcmk_rc_ok) { crm_err("Could not lock '%s' for %s: %s " CRM_XS " rc=%d", pidfile, name, pcmk_rc_str(rc), rc); printf("Could not lock '%s' for %s: %s (%d)\n", pidfile, name, pcmk_rc_str(rc), rc); crm_exit(CRM_EX_ERROR); } umask(S_IWGRP | S_IWOTH | S_IROTH); close(STDIN_FILENO); - (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ + pcmk__open_devnull(O_RDONLY); // stdin (fd 0) + close(STDOUT_FILENO); - (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ + pcmk__open_devnull(O_WRONLY); // stdout (fd 1) + close(STDERR_FILENO); - (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ + pcmk__open_devnull(O_WRONLY); // stderr (fd 2) } char * crm_meta_name(const char *field) { int lpc = 0; int max = 0; char *crm_name = NULL; CRM_CHECK(field != NULL, return NULL); crm_name = crm_strdup_printf(CRM_META "_%s", field); /* Massage the names so they can be used as shell variables */ max = strlen(crm_name); for (; lpc < max; lpc++) { switch (crm_name[lpc]) { case '-': crm_name[lpc] = '_'; break; } } return crm_name; } const char * crm_meta_value(GHashTable * hash, const char *field) { char *key = NULL; const char *value = NULL; key = crm_meta_name(field); if (key) { value = g_hash_table_lookup(hash, key); free(key); } return value; } -void * -find_library_function(void **handle, const char *lib, const char *fn, gboolean fatal) -{ - char *error; - void *a_function; - - if (*handle == NULL) { - *handle = dlopen(lib, RTLD_LAZY); - } - - if (!(*handle)) { - crm_err("%sCould not open %s: %s", fatal ? "Fatal: " : "", lib, dlerror()); - if (fatal) { - crm_exit(CRM_EX_FATAL); - } - return NULL; - } - - a_function = dlsym(*handle, fn); - if (a_function == NULL) { - error = dlerror(); - crm_err("%sCould not find %s in %s: %s", fatal ? "Fatal: " : "", fn, lib, error); - if (fatal) { - crm_exit(CRM_EX_FATAL); - } - } - - return a_function; -} - #ifdef HAVE_UUID_UUID_H # include #endif char * crm_generate_uuid(void) { unsigned char uuid[16]; char *buffer = malloc(37); /* Including NUL byte */ uuid_generate(uuid); uuid_unparse(uuid, buffer); return buffer; } /*! * \brief Get name to be used as identifier for cluster messages * * \param[in] name Actual system name to check * * \return Non-NULL cluster message identifier corresponding to name * * \note The Pacemaker daemons were renamed in version 2.0.0, but the old names * must continue to be used as the identifier for cluster messages, so * that mixed-version clusters are possible during a rolling upgrade. */ const char * pcmk_message_name(const char *name) { if (name == NULL) { return "unknown"; } else if (!strcmp(name, "pacemaker-attrd")) { return "attrd"; } else if (!strcmp(name, "pacemaker-based")) { return CRM_SYSTEM_CIB; } else if (!strcmp(name, "pacemaker-controld")) { return CRM_SYSTEM_CRMD; } else if (!strcmp(name, "pacemaker-execd")) { return CRM_SYSTEM_LRMD; } else if (!strcmp(name, "pacemaker-fenced")) { return "stonith-ng"; } else if (!strcmp(name, "pacemaker-schedulerd")) { return CRM_SYSTEM_PENGINE; } else { return name; } } /*! * \brief Check whether a string represents a cluster daemon name * * \param[in] name String to check * * \return TRUE if name is standard client name used by daemons, FALSE otherwise */ bool crm_is_daemon_name(const char *name) { name = pcmk_message_name(name); return (!strcmp(name, CRM_SYSTEM_CRMD) || !strcmp(name, CRM_SYSTEM_STONITHD) || !strcmp(name, "stonith-ng") || !strcmp(name, "attrd") || !strcmp(name, CRM_SYSTEM_CIB) || !strcmp(name, CRM_SYSTEM_MCP) || !strcmp(name, CRM_SYSTEM_DC) || !strcmp(name, CRM_SYSTEM_TENGINE) || !strcmp(name, CRM_SYSTEM_LRMD)); } #include char * crm_md5sum(const char *buffer) { int lpc = 0, len = 0; char *digest = NULL; unsigned char raw_digest[MD5_DIGEST_SIZE]; if (buffer == NULL) { buffer = ""; } len = strlen(buffer); crm_trace("Beginning digest of %d bytes", len); digest = malloc(2 * MD5_DIGEST_SIZE + 1); if(digest) { md5_buffer(buffer, len, raw_digest); for (lpc = 0; lpc < MD5_DIGEST_SIZE; lpc++) { sprintf(digest + (2 * lpc), "%02x", raw_digest[lpc]); } digest[(2 * MD5_DIGEST_SIZE)] = 0; crm_trace("Digest %s.", digest); } else { crm_err("Could not create digest"); } return digest; } #ifdef HAVE_GNUTLS_GNUTLS_H void crm_gnutls_global_init(void) { signal(SIGPIPE, SIG_IGN); gnutls_global_init(); } #endif /*! * \brief Get the local hostname * * \return Newly allocated string with name, or NULL (and set errno) on error */ char * pcmk_hostname() { struct utsname hostinfo; return (uname(&hostinfo) < 0)? NULL : strdup(hostinfo.nodename); } bool pcmk_str_is_infinity(const char *s) { return crm_str_eq(s, CRM_INFINITY_S, TRUE) || crm_str_eq(s, CRM_PLUS_INFINITY_S, TRUE); } bool pcmk_str_is_minus_infinity(const char *s) { return crm_str_eq(s, CRM_MINUS_INFINITY_S, TRUE); } diff --git a/lib/common/watchdog.c b/lib/common/watchdog.c index 8dadfa13e8..9d8896ba53 100644 --- a/lib/common/watchdog.c +++ b/lib/common/watchdog.c @@ -1,278 +1,272 @@ /* * Copyright 2013-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #ifdef _POSIX_MEMLOCK # include #endif static pid_t sbd_pid = 0; enum pcmk_panic_flags { pcmk_panic_none = 0x00, pcmk_panic_delay = 0x01, pcmk_panic_kdump = 0x02, pcmk_panic_shutdown = 0x04, }; static void sysrq_trigger(char t) { #if SUPPORT_PROCFS FILE *procf; // Root can always write here, regardless of kernel.sysrq value procf = fopen("/proc/sysrq-trigger", "a"); if (!procf) { crm_perror(LOG_WARNING, "Opening sysrq-trigger failed"); return; } crm_info("sysrq-trigger: %c", t); fprintf(procf, "%c\n", t); fclose(procf); #endif // SUPPORT_PROCFS return; } static void pcmk_panic_local(void) { int rc = pcmk_ok; uid_t uid = geteuid(); pid_t ppid = getppid(); if(uid != 0 && ppid > 1) { /* We're a non-root pacemaker daemon (pacemaker-based, * pacemaker-controld, pacemaker-schedulerd, pacemaker-attrd, etc.) with * the original pacemakerd parent. * * Of these, only the controller is likely to be initiating resets. */ - do_crm_log_always(LOG_EMERG, "Signaling parent %lld to panic", - (long long) ppid); + crm_emerg("Signaling parent %lld to panic", (long long) ppid); crm_exit(CRM_EX_PANIC); return; } else if (uid != 0) { #if SUPPORT_PROCFS /* * No permissions, and no pacemakerd parent to escalate to. * Track down the new pacemakerd process and send a signal instead. */ union sigval signal_value; memset(&signal_value, 0, sizeof(signal_value)); ppid = pcmk__procfs_pid_of("pacemakerd"); - do_crm_log_always(LOG_EMERG, "Signaling pacemakerd[%lld] to panic", - (long long) ppid); + crm_emerg("Signaling pacemakerd[%lld] to panic", (long long) ppid); if(ppid > 1 && sigqueue(ppid, SIGQUIT, signal_value) < 0) { crm_perror(LOG_EMERG, "Cannot signal pacemakerd[%lld] to panic", (long long) ppid); } #endif // SUPPORT_PROCFS /* The best we can do now is die */ crm_exit(CRM_EX_PANIC); return; } /* We're either pacemakerd, or a pacemaker daemon running as root */ if (safe_str_eq("crash", getenv("PCMK_panic_action"))) { sysrq_trigger('c'); } else { sysrq_trigger('b'); } /* reboot(RB_HALT_SYSTEM); rc = errno; */ reboot(RB_AUTOBOOT); rc = errno; - do_crm_log_always(LOG_EMERG, "Reboot failed, escalating to parent %lld: %s " - CRM_XS " rc=%d", (long long) ppid, pcmk_rc_str(rc), rc); + crm_emerg("Reboot failed, escalating to parent %lld: %s " CRM_XS " rc=%d", + (long long) ppid, pcmk_rc_str(rc), rc); if(ppid > 1) { /* child daemon */ exit(CRM_EX_PANIC); } else { /* pacemakerd or orphan child */ exit(CRM_EX_FATAL); } } static void pcmk_panic_sbd(void) { union sigval signal_value; pid_t ppid = getppid(); - do_crm_log_always(LOG_EMERG, "Signaling sbd[%lld] to panic", - (long long) sbd_pid); + crm_emerg("Signaling sbd[%lld] to panic", (long long) sbd_pid); memset(&signal_value, 0, sizeof(signal_value)); /* TODO: Arrange for a slightly less brutal option? */ if(sigqueue(sbd_pid, SIGKILL, signal_value) < 0) { crm_perror(LOG_EMERG, "Cannot signal sbd[%lld] to terminate", (long long) sbd_pid); pcmk_panic_local(); } if(ppid > 1) { /* child daemon */ exit(CRM_EX_PANIC); } else { /* pacemakerd or orphan child */ exit(CRM_EX_FATAL); } } void pcmk_panic(const char *origin) { static struct qb_log_callsite *panic_cs = NULL; if (panic_cs == NULL) { panic_cs = qb_log_callsite_get(__func__, __FILE__, "panic-delay", LOG_TRACE, __LINE__, crm_trace_nonlog); } /* Ensure sbd_pid is set */ (void)pcmk_locate_sbd(); if (panic_cs && panic_cs->targets) { /* getppid() == 1 means our original parent no longer exists */ - do_crm_log_always(LOG_EMERG, - "Shutting down instead of panicking the node " - CRM_XS " origin=%s sbd=%lld parent=%d", - origin, (long long) sbd_pid, getppid()); + crm_emerg("Shutting down instead of panicking the node " + CRM_XS " origin=%s sbd=%lld parent=%d", + origin, (long long) sbd_pid, getppid()); crm_exit(CRM_EX_FATAL); return; } if(sbd_pid > 1) { - do_crm_log_always(LOG_EMERG, "Signaling sbd[%lld] to panic the system: %s", - (long long) sbd_pid, origin); + crm_emerg("Signaling sbd[%lld] to panic the system: %s", + (long long) sbd_pid, origin); pcmk_panic_sbd(); } else { - do_crm_log_always(LOG_EMERG, "Panicking the system directly: %s", origin); + crm_emerg("Panicking the system directly: %s", origin); pcmk_panic_local(); } } pid_t pcmk_locate_sbd(void) { char *pidfile = NULL; char *sbd_path = NULL; int rc; if(sbd_pid > 1) { return sbd_pid; } /* Look for the pid file */ pidfile = crm_strdup_printf(PCMK_RUN_DIR "/sbd.pid"); sbd_path = crm_strdup_printf("%s/sbd", SBIN_DIR); /* Read the pid file */ rc = pcmk__pidfile_matches(pidfile, 0, sbd_path, &sbd_pid); if (rc == pcmk_rc_ok) { crm_trace("SBD detected at pid %lld (via PID file %s)", (long long) sbd_pid, pidfile); #if SUPPORT_PROCFS } else { /* Fall back to /proc for systems that support it */ sbd_pid = pcmk__procfs_pid_of("sbd"); crm_trace("SBD detected at pid %lld (via procfs)", (long long) sbd_pid); #endif // SUPPORT_PROCFS } if(sbd_pid < 0) { sbd_pid = 0; crm_trace("SBD not detected"); } free(pidfile); free(sbd_path); return sbd_pid; } long pcmk__get_sbd_timeout(void) { static long sbd_timeout = -2; if (sbd_timeout == -2) { sbd_timeout = crm_get_msec(getenv("SBD_WATCHDOG_TIMEOUT")); } return sbd_timeout; } long pcmk__auto_watchdog_timeout() { long sbd_timeout = pcmk__get_sbd_timeout(); return (sbd_timeout <= 0)? 0 : (2 * sbd_timeout); } bool pcmk__valid_sbd_timeout(const char *value) { long st_timeout = value? crm_get_msec(value) : 0; if (st_timeout < 0) { st_timeout = pcmk__auto_watchdog_timeout(); crm_debug("Using calculated value %ld for stonith-watchdog-timeout (%s)", st_timeout, value); } if (st_timeout == 0) { crm_debug("Watchdog may be enabled but stonith-watchdog-timeout is disabled (%s)", value? value : "default"); } else if (pcmk_locate_sbd() == 0) { - do_crm_log_always(LOG_EMERG, - "Shutting down: stonith-watchdog-timeout configured (%s) but SBD not active", - (value? value : "auto")); + crm_emerg("Shutting down: stonith-watchdog-timeout configured (%s) " + "but SBD not active", (value? value : "auto")); crm_exit(CRM_EX_FATAL); return false; } else { long sbd_timeout = pcmk__get_sbd_timeout(); if (st_timeout < sbd_timeout) { - do_crm_log_always(LOG_EMERG, - "Shutting down: stonith-watchdog-timeout (%s) too short (must be >%ldms)", - value, sbd_timeout); + crm_emerg("Shutting down: stonith-watchdog-timeout (%s) too short " + "(must be >%ldms)", value, sbd_timeout); crm_exit(CRM_EX_FATAL); return false; } crm_info("Watchdog configured with stonith-watchdog-timeout %s and SBD timeout %ldms", value, sbd_timeout); } return true; } diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c index d76b956bf4..0806c1333a 100644 --- a/lib/fencing/st_client.c +++ b/lib/fencing/st_client.c @@ -1,2619 +1,2619 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(stonith); struct stonith_action_s { /*! user defined data */ char *agent; char *action; char *victim; GHashTable *args; int timeout; int async; void *userdata; void (*done_cb) (GPid pid, gint status, const char *output, gpointer user_data); void (*fork_cb) (GPid pid, gpointer user_data); svc_action_t *svc_action; /*! internal timing information */ time_t initial_start_time; int tries; int remaining_timeout; int max_retries; /* device output data */ GPid pid; int rc; char *output; char *error; }; typedef struct stonith_private_s { char *token; crm_ipc_t *ipc; mainloop_io_t *source; GHashTable *stonith_op_callback_table; GList *notify_list; int notify_refcnt; bool notify_deletes; void (*op_callback) (stonith_t * st, stonith_callback_data_t * data); } stonith_private_t; typedef struct stonith_notify_client_s { const char *event; const char *obj_id; /* implement one day */ const char *obj_type; /* implement one day */ void (*notify) (stonith_t * st, stonith_event_t * e); bool delete; } stonith_notify_client_t; typedef struct stonith_callback_client_s { void (*callback) (stonith_t * st, stonith_callback_data_t * data); const char *id; void *user_data; gboolean only_success; gboolean allow_timeout_updates; struct timer_rec_s *timer; } stonith_callback_client_t; struct notify_blob_s { stonith_t *stonith; xmlNode *xml; }; struct timer_rec_s { int call_id; int timeout; guint ref; stonith_t *stonith; }; typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *, xmlNode *, xmlNode *, xmlNode **, xmlNode **); bool stonith_dispatch(stonith_t * st); xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); static int stonith_send_command(stonith_t *stonith, const char *op, xmlNode *data, xmlNode **output_data, int call_options, int timeout); static void stonith_connection_destroy(gpointer user_data); static void stonith_send_notification(gpointer data, gpointer user_data); static int internal_stonith_action_execute(stonith_action_t * action); static void log_action(stonith_action_t *action, pid_t pid); /*! * \brief Get agent namespace by name * * \param[in] namespace_s Name of namespace as string * * \return Namespace as enum value */ enum stonith_namespace stonith_text2namespace(const char *namespace_s) { if ((namespace_s == NULL) || !strcmp(namespace_s, "any")) { return st_namespace_any; } else if (!strcmp(namespace_s, "redhat") || !strcmp(namespace_s, "stonith-ng")) { return st_namespace_rhcs; } else if (!strcmp(namespace_s, "internal")) { return st_namespace_internal; } else if (!strcmp(namespace_s, "heartbeat")) { return st_namespace_lha; } return st_namespace_invalid; } /*! * \brief Get agent namespace name * * \param[in] namespace Namespace as enum value * * \return Namespace name as string */ const char * stonith_namespace2text(enum stonith_namespace st_namespace) { switch (st_namespace) { case st_namespace_any: return "any"; case st_namespace_rhcs: return "stonith-ng"; case st_namespace_internal: return "internal"; case st_namespace_lha: return "heartbeat"; default: break; } return "unsupported"; } /*! * \brief Determine namespace of a fence agent * * \param[in] agent Fence agent type * \param[in] namespace_s Name of agent namespace as string, if known * * \return Namespace of specified agent, as enum value */ enum stonith_namespace stonith_get_namespace(const char *agent, const char *namespace_s) { if (safe_str_eq(namespace_s, "internal")) { return st_namespace_internal; } if (stonith__agent_is_rhcs(agent)) { return st_namespace_rhcs; } #if HAVE_STONITH_STONITH_H if (stonith__agent_is_lha(agent)) { return st_namespace_lha; } #endif crm_err("Unknown fence agent: %s", agent); return st_namespace_invalid; } static void log_action(stonith_action_t *action, pid_t pid) { if (action->output) { /* Logging the whole string confuses syslog when the string is xml */ char *prefix = crm_strdup_printf("%s[%d] stdout:", action->agent, pid); crm_log_output(LOG_TRACE, prefix, action->output); free(prefix); } if (action->error) { /* Logging the whole string confuses syslog when the string is xml */ char *prefix = crm_strdup_printf("%s[%d] stderr:", action->agent, pid); crm_log_output(LOG_WARNING, prefix, action->error); free(prefix); } } /* when cycling through the list we don't want to delete items so just mark them and when we know nobody is using the list loop over it to remove the marked items */ static void foreach_notify_entry (stonith_private_t *private, GFunc func, gpointer user_data) { private->notify_refcnt++; g_list_foreach(private->notify_list, func, user_data); private->notify_refcnt--; if ((private->notify_refcnt == 0) && private->notify_deletes) { GList *list_item = private->notify_list; private->notify_deletes = FALSE; while (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; GList *next = g_list_next(list_item); if (list_client->delete) { free(list_client); private->notify_list = g_list_delete_link(private->notify_list, list_item); } list_item = next; } } } static void stonith_connection_destroy(gpointer user_data) { stonith_t *stonith = user_data; stonith_private_t *native = NULL; struct notify_blob_s blob; crm_trace("Sending destroyed notification"); blob.stonith = stonith; blob.xml = create_xml_node(NULL, "notify"); native = stonith->st_private; native->ipc = NULL; native->source = NULL; free(native->token); native->token = NULL; stonith->state = stonith_disconnected; crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY); crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT); foreach_notify_entry(native, stonith_send_notification, &blob); free_xml(blob.xml); } xmlNode * create_device_registration_xml(const char *id, enum stonith_namespace namespace, const char *agent, stonith_key_value_t *params, const char *rsc_provides) { xmlNode *data = create_xml_node(NULL, F_STONITH_DEVICE); xmlNode *args = create_xml_node(data, XML_TAG_ATTRS); #if HAVE_STONITH_STONITH_H if (namespace == st_namespace_any) { namespace = stonith_get_namespace(agent, NULL); } if (namespace == st_namespace_lha) { hash2field((gpointer) "plugin", (gpointer) agent, args); agent = "fence_legacy"; } #endif crm_xml_add(data, XML_ATTR_ID, id); crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__); crm_xml_add(data, "agent", agent); if ((namespace != st_namespace_any) && (namespace != st_namespace_invalid)) { crm_xml_add(data, "namespace", stonith_namespace2text(namespace)); } if (rsc_provides) { crm_xml_add(data, "rsc_provides", rsc_provides); } for (; params; params = params->next) { hash2field((gpointer) params->key, (gpointer) params->value, args); } return data; } static int stonith_api_register_device(stonith_t * st, int call_options, const char *id, const char *namespace, const char *agent, stonith_key_value_t * params) { int rc = 0; xmlNode *data = NULL; data = create_device_registration_xml(id, stonith_text2namespace(namespace), agent, params, NULL); rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0); free_xml(data); return rc; } static int stonith_api_remove_device(stonith_t * st, int call_options, const char *name) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__); crm_xml_add(data, XML_ATTR_ID, name); rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0); free_xml(data); return rc; } static int stonith_api_remove_level_full(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level) { int rc = 0; xmlNode *data = NULL; CRM_CHECK(node || pattern || (attr && value), return -EINVAL); data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__); if (node) { crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); } else if (pattern) { crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern); } else { crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr); crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value); } crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0); free_xml(data); return rc; } static int stonith_api_remove_level(stonith_t * st, int options, const char *node, int level) { return stonith_api_remove_level_full(st, options, node, NULL, NULL, NULL, level); } /*! * \internal * \brief Create XML for fence topology level registration request * * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to register * \param[in] device_list List of devices in level * * \return Newly allocated XML tree on success, NULL otherwise * * \note The caller should set only one of node, pattern or attr/value. */ xmlNode * create_level_registration_xml(const char *node, const char *pattern, const char *attr, const char *value, int level, stonith_key_value_t *device_list) { int len = 0; char *list = NULL; xmlNode *data; CRM_CHECK(node || pattern || (attr && value), return NULL); data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); CRM_CHECK(data, return NULL); crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__); crm_xml_add_int(data, XML_ATTR_ID, level); crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); if (node) { crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); } else if (pattern) { crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern); } else { crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr); crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value); } for (; device_list; device_list = device_list->next) { int adding = strlen(device_list->value); if(list) { adding++; /* +1 space */ } crm_trace("Adding %s (%dc) at offset %d", device_list->value, adding, len); list = realloc_safe(list, len + adding + 1); /* +1 EOS */ if (list == NULL) { crm_perror(LOG_CRIT, "Could not create device list"); free_xml(data); return NULL; } sprintf(list + len, "%s%s", len?",":"", device_list->value); len += adding; } crm_xml_add(data, XML_ATTR_STONITH_DEVICES, list); free(list); return data; } static int stonith_api_register_level_full(stonith_t * st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level, stonith_key_value_t *device_list) { int rc = 0; xmlNode *data = create_level_registration_xml(node, pattern, attr, value, level, device_list); CRM_CHECK(data != NULL, return -EINVAL); rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0); free_xml(data); return rc; } static int stonith_api_register_level(stonith_t * st, int options, const char *node, int level, stonith_key_value_t * device_list) { return stonith_api_register_level_full(st, options, node, NULL, NULL, NULL, level, device_list); } static void append_arg(const char *key, const char *value, GHashTable **args) { CRM_CHECK(key != NULL, return); CRM_CHECK(value != NULL, return); CRM_CHECK(args != NULL, return); if (strstr(key, "pcmk_")) { return; } else if (strstr(key, CRM_META)) { return; } else if (safe_str_eq(key, "crm_feature_set")) { return; } if (!*args) { *args = crm_str_table_new(); } CRM_CHECK(*args != NULL, return); crm_trace("Appending: %s=%s", key, value); g_hash_table_replace(*args, strdup(key), strdup(value)); } static void append_config_arg(gpointer key, gpointer value, gpointer user_data) { /* The fencer will filter action out when it registers the device, * but ignore it here just in case any other library callers * fail to do so. */ if (safe_str_neq(key, STONITH_ATTR_ACTION_OP)) { append_arg(key, value, user_data); return; } } static GHashTable * make_args(const char *agent, const char *action, const char *victim, uint32_t victim_nodeid, GHashTable * device_args, GHashTable * port_map) { char buffer[512]; GHashTable *arg_list = NULL; const char *value = NULL; CRM_CHECK(action != NULL, return NULL); snprintf(buffer, sizeof(buffer), "pcmk_%s_action", action); if (device_args) { value = g_hash_table_lookup(device_args, buffer); } if (value) { crm_debug("Substituting action '%s' for requested operation '%s'", value, action); action = value; } append_arg(STONITH_ATTR_ACTION_OP, action, &arg_list); if (victim && device_args) { const char *alias = victim; const char *param = g_hash_table_lookup(device_args, STONITH_ATTR_HOSTARG); if (port_map && g_hash_table_lookup(port_map, victim)) { alias = g_hash_table_lookup(port_map, victim); } /* Always supply the node's name, too: * https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md */ append_arg("nodename", victim, &arg_list); if (victim_nodeid) { char nodeid_str[33] = { 0, }; if (snprintf(nodeid_str, 33, "%u", (unsigned int)victim_nodeid)) { crm_info("For stonith action (%s) for victim %s, adding nodeid (%s) to parameters", action, victim, nodeid_str); append_arg("nodeid", nodeid_str, &arg_list); } } /* Check if we need to supply the victim in any other form */ if(safe_str_eq(agent, "fence_legacy")) { value = agent; } else if (param == NULL) { param = "port"; value = g_hash_table_lookup(device_args, param); } else if (safe_str_eq(param, "none")) { value = param; /* Nothing more to do */ } else { value = g_hash_table_lookup(device_args, param); } /* Don't overwrite explictly set values for $param */ if (value == NULL || safe_str_eq(value, "dynamic")) { crm_debug("Performing '%s' action targeting '%s' as '%s=%s'", action, victim, param, alias); append_arg(param, alias, &arg_list); } } if (device_args) { g_hash_table_foreach(device_args, append_config_arg, &arg_list); } return arg_list; } /*! * \internal * \brief Free all memory used by a stonith action * * \param[in,out] action Action to free */ void stonith__destroy_action(stonith_action_t *action) { if (action) { free(action->agent); if (action->args) { g_hash_table_destroy(action->args); } free(action->action); free(action->victim); if (action->svc_action) { services_action_free(action->svc_action); } free(action->output); free(action->error); free(action); } } /*! * \internal * \brief Get the result of an executed stonith action * * \param[in,out] action Executed action * \param[out] rc Where to store result code (or NULL) * \param[out] output Where to store standard output (or NULL) * \param[out] error_output Where to store standard error output (or NULL) * * \note If output or error_output is not NULL, the caller is responsible for * freeing the memory. */ void stonith__action_result(stonith_action_t *action, int *rc, char **output, char **error_output) { if (rc) { *rc = pcmk_ok; } if (output) { *output = NULL; } if (error_output) { *error_output = NULL; } if (action != NULL) { if (rc) { *rc = action->rc; } if (output && action->output) { *output = action->output; action->output = NULL; // hand off memory management to caller } if (error_output && action->error) { *error_output = action->error; action->error = NULL; // hand off memory management to caller } } } #define FAILURE_MAX_RETRIES 2 stonith_action_t * stonith_action_create(const char *agent, const char *_action, const char *victim, uint32_t victim_nodeid, int timeout, GHashTable * device_args, GHashTable * port_map) { stonith_action_t *action; action = calloc(1, sizeof(stonith_action_t)); action->args = make_args(agent, _action, victim, victim_nodeid, device_args, port_map); crm_debug("Preparing '%s' action for %s using agent %s", _action, (victim? victim : "no target"), agent); action->agent = strdup(agent); action->action = strdup(_action); if (victim) { action->victim = strdup(victim); } action->timeout = action->remaining_timeout = timeout; action->max_retries = FAILURE_MAX_RETRIES; if (device_args) { char buffer[512]; const char *value = NULL; snprintf(buffer, sizeof(buffer), "pcmk_%s_retries", _action); value = g_hash_table_lookup(device_args, buffer); if (value) { action->max_retries = atoi(value); } } return action; } static gboolean update_remaining_timeout(stonith_action_t * action) { int diff = time(NULL) - action->initial_start_time; if (action->tries >= action->max_retries) { crm_info("Attempted to execute agent %s (%s) the maximum number of times (%d) allowed", action->agent, action->action, action->max_retries); action->remaining_timeout = 0; } else if ((action->rc != -ETIME) && diff < (action->timeout * 0.7)) { /* only set remaining timeout period if there is 30% * or greater of the original timeout period left */ action->remaining_timeout = action->timeout - diff; } else { action->remaining_timeout = 0; } return action->remaining_timeout ? TRUE : FALSE; } static int svc_action_to_errno(svc_action_t *svc_action) { int rv = pcmk_ok; if (svc_action->rc > 0) { /* Try to provide a useful error code based on the fence agent's * error output. */ if (svc_action->rc == PCMK_OCF_TIMEOUT) { rv = -ETIME; } else if (svc_action->stderr_data == NULL) { rv = -ENODATA; } else if (strstr(svc_action->stderr_data, "imed out")) { /* Some agents have their own internal timeouts */ rv = -ETIME; } else if (strstr(svc_action->stderr_data, "Unrecognised action")) { rv = -EOPNOTSUPP; } else { rv = -pcmk_err_generic; } } return rv; } static void stonith_action_async_done(svc_action_t *svc_action) { stonith_action_t *action = (stonith_action_t *) svc_action->cb_data; action->rc = svc_action_to_errno(svc_action); action->output = svc_action->stdout_data; svc_action->stdout_data = NULL; action->error = svc_action->stderr_data; svc_action->stderr_data = NULL; svc_action->params = NULL; crm_debug("Child process %d performing action '%s' exited with rc %d", action->pid, action->action, svc_action->rc); log_action(action, action->pid); if (action->rc != pcmk_ok && update_remaining_timeout(action)) { int rc = internal_stonith_action_execute(action); if (rc == pcmk_ok) { return; } } if (action->done_cb) { action->done_cb(action->pid, action->rc, action->output, action->userdata); } action->svc_action = NULL; // don't remove our caller stonith__destroy_action(action); } static void stonith_action_async_forked(svc_action_t *svc_action) { stonith_action_t *action = (stonith_action_t *) svc_action->cb_data; action->pid = svc_action->pid; action->svc_action = svc_action; if (action->fork_cb) { (action->fork_cb) (svc_action->pid, action->userdata); } crm_trace("Child process %d performing action '%s' successfully forked", action->pid, action->action); } static int internal_stonith_action_execute(stonith_action_t * action) { int rc = -EPROTO; int is_retry = 0; svc_action_t *svc_action = NULL; static int stonith_sequence = 0; char *buffer = NULL; if (!action->tries) { action->initial_start_time = time(NULL); } action->tries++; if (action->tries > 1) { crm_info("Attempt %d to execute %s (%s). remaining timeout is %d", action->tries, action->agent, action->action, action->remaining_timeout); is_retry = 1; } if (action->args == NULL || action->agent == NULL) goto fail; buffer = crm_strdup_printf(RH_STONITH_DIR "/%s", basename(action->agent)); svc_action = services_action_create_generic(buffer, NULL); free(buffer); svc_action->timeout = 1000 * action->remaining_timeout; svc_action->standard = strdup(PCMK_RESOURCE_CLASS_STONITH); svc_action->id = crm_strdup_printf("%s_%s_%d", basename(action->agent), action->action, action->tries); svc_action->agent = strdup(action->agent); svc_action->sequence = stonith_sequence++; svc_action->params = action->args; svc_action->cb_data = (void *) action; set_bit(svc_action->flags, SVC_ACTION_NON_BLOCKED); /* keep retries from executing out of control and free previous results */ if (is_retry) { free(action->output); action->output = NULL; free(action->error); action->error = NULL; sleep(1); } if (action->async) { /* async */ if(services_action_async_fork_notify(svc_action, &stonith_action_async_done, &stonith_action_async_forked) == FALSE) { services_action_free(svc_action); svc_action = NULL; } else { rc = 0; } } else { /* sync */ if (services_action_sync(svc_action)) { rc = 0; action->rc = svc_action_to_errno(svc_action); action->output = svc_action->stdout_data; svc_action->stdout_data = NULL; action->error = svc_action->stderr_data; svc_action->stderr_data = NULL; } else { action->rc = -ECONNABORTED; rc = action->rc; } svc_action->params = NULL; services_action_free(svc_action); } fail: return rc; } /*! * \internal * \brief Kick off execution of an async stonith action * * \param[in,out] action Action to be executed * \param[in,out] userdata Datapointer to be passed to callbacks * \param[in] done Callback to notify action has failed/succeeded * \param[in] fork_callback Callback to notify successful fork of child * * \return pcmk_ok if ownership of action has been taken, -errno otherwise */ int stonith_action_execute_async(stonith_action_t * action, void *userdata, void (*done) (GPid pid, int rc, const char *output, gpointer user_data), void (*fork_cb) (GPid pid, gpointer user_data)) { if (!action) { return -EINVAL; } action->userdata = userdata; action->done_cb = done; action->fork_cb = fork_cb; action->async = 1; return internal_stonith_action_execute(action); } /*! * \internal * \brief Execute a stonith action * * \param[in,out] action Action to execute * * \return pcmk_ok on success, -errno otherwise */ int stonith__execute(stonith_action_t *action) { int rc = pcmk_ok; CRM_CHECK(action != NULL, return -EINVAL); // Keep trying until success, max retries, or timeout do { rc = internal_stonith_action_execute(action); } while ((rc != pcmk_ok) && update_remaining_timeout(action)); return rc; } static int stonith_api_device_list(stonith_t * stonith, int call_options, const char *namespace, stonith_key_value_t ** devices, int timeout) { int count = 0; enum stonith_namespace ns = stonith_text2namespace(namespace); if (devices == NULL) { crm_err("Parameter error: stonith_api_device_list"); return -EFAULT; } #if HAVE_STONITH_STONITH_H // Include Linux-HA agents if requested if ((ns == st_namespace_any) || (ns == st_namespace_lha)) { count += stonith__list_lha_agents(devices); } #endif // Include Red Hat agents if requested if ((ns == st_namespace_any) || (ns == st_namespace_rhcs)) { count += stonith__list_rhcs_agents(devices); } return count; } static int stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *agent, const char *namespace, char **output, int timeout) { /* By executing meta-data directly, we can get it from stonith_admin when * the cluster is not running, which is important for higher-level tools. */ enum stonith_namespace ns = stonith_get_namespace(agent, namespace); crm_trace("Looking up metadata for %s agent %s", stonith_namespace2text(ns), agent); switch (ns) { case st_namespace_rhcs: return stonith__rhcs_metadata(agent, timeout, output); #if HAVE_STONITH_STONITH_H case st_namespace_lha: return stonith__lha_metadata(agent, timeout, output); #endif default: crm_err("Can't get fence agent '%s' meta-data: No such agent", agent); break; } return -ENODEV; } static int stonith_api_query(stonith_t * stonith, int call_options, const char *target, stonith_key_value_t ** devices, int timeout) { int rc = 0, lpc = 0, max = 0; xmlNode *data = NULL; xmlNode *output = NULL; xmlXPathObjectPtr xpathObj = NULL; CRM_CHECK(devices != NULL, return -EINVAL); data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, target); crm_xml_add(data, F_STONITH_ACTION, "off"); rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout); if (rc < 0) { return rc; } xpathObj = xpath_search(output, "//@agent"); if (xpathObj) { max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match != NULL) { xmlChar *match_path = xmlGetNodePath(match); crm_info("%s[%d] = %s", "//@agent", lpc, match_path); free(match_path); *devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID)); } } freeXpathObject(xpathObj); } free_xml(output); free_xml(data); return max; } static int stonith_api_call(stonith_t * stonith, int call_options, const char *id, const char *action, const char *victim, int timeout, xmlNode ** output) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__); crm_xml_add(data, F_STONITH_DEVICE, id); crm_xml_add(data, F_STONITH_ACTION, action); crm_xml_add(data, F_STONITH_TARGET, victim); rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout); free_xml(data); return rc; } static int stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info, int timeout) { int rc; xmlNode *output = NULL; rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output); if (output && list_info) { const char *list_str; list_str = crm_element_value(output, "st_output"); if (list_str) { *list_info = strdup(list_str); } } if (output) { free_xml(output); } return rc; } static int stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout) { return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL); } static int stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port, int timeout) { return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL); } static int stonith_api_fence_with_delay(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout, int tolerance, int delay) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, node); crm_xml_add(data, F_STONITH_ACTION, action); crm_xml_add_int(data, F_STONITH_TIMEOUT, timeout); crm_xml_add_int(data, F_STONITH_TOLERANCE, tolerance); if (delay >= 0) { crm_xml_add_int(data, F_STONITH_DELAY, delay); } rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout); free_xml(data); return rc; } static int stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout, int tolerance) { return stonith_api_fence_with_delay(stonith, call_options, node, action, timeout, tolerance, -1); } static int stonith_api_confirm(stonith_t * stonith, int call_options, const char *target) { return stonith_api_fence(stonith, call_options | st_opt_manual_ack, target, "off", 0, 0); } static int stonith_api_history(stonith_t * stonith, int call_options, const char *node, stonith_history_t ** history, int timeout) { int rc = 0; xmlNode *data = NULL; xmlNode *output = NULL; stonith_history_t *last = NULL; *history = NULL; if (node) { data = create_xml_node(NULL, __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, node); } rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output, call_options | st_opt_sync_call, timeout); free_xml(data); if (rc == 0) { xmlNode *op = NULL; xmlNode *reply = get_xpath_object("//" F_STONITH_HISTORY_LIST, output, LOG_NEVER); for (op = __xml_first_child(reply); op != NULL; op = __xml_next(op)) { stonith_history_t *kvp; - int completed; + long long completed; kvp = calloc(1, sizeof(stonith_history_t)); kvp->target = crm_element_value_copy(op, F_STONITH_TARGET); kvp->action = crm_element_value_copy(op, F_STONITH_ACTION); kvp->origin = crm_element_value_copy(op, F_STONITH_ORIGIN); kvp->delegate = crm_element_value_copy(op, F_STONITH_DELEGATE); kvp->client = crm_element_value_copy(op, F_STONITH_CLIENTNAME); - crm_element_value_int(op, F_STONITH_DATE, &completed); + crm_element_value_ll(op, F_STONITH_DATE, &completed); kvp->completed = (time_t) completed; crm_element_value_int(op, F_STONITH_STATE, &kvp->state); if (last) { last->next = kvp; } else { *history = kvp; } last = kvp; } } free_xml(output); return rc; } void stonith_history_free(stonith_history_t *history) { stonith_history_t *hp, *hp_old; for (hp = history; hp; hp_old = hp, hp = hp->next, free(hp_old)) { free(hp->target); free(hp->action); free(hp->origin); free(hp->delegate); free(hp->client); } } static gint stonithlib_GCompareFunc(gconstpointer a, gconstpointer b) { int rc = 0; const stonith_notify_client_t *a_client = a; const stonith_notify_client_t *b_client = b; if (a_client->delete || b_client->delete) { /* make entries marked for deletion not findable */ return -1; } CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0); rc = strcmp(a_client->event, b_client->event); if (rc == 0) { if (a_client->notify == NULL || b_client->notify == NULL) { return 0; } else if (a_client->notify == b_client->notify) { return 0; } else if (((long)a_client->notify) < ((long)b_client->notify)) { crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return -1; } crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return 1; } return rc; } xmlNode * stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options) { xmlNode *op_msg = create_xml_node(NULL, "stonith_command"); CRM_CHECK(op_msg != NULL, return NULL); CRM_CHECK(token != NULL, return NULL); crm_xml_add(op_msg, F_XML_TAGNAME, "stonith_command"); crm_xml_add(op_msg, F_TYPE, T_STONITH_NG); crm_xml_add(op_msg, F_STONITH_CALLBACK_TOKEN, token); crm_xml_add(op_msg, F_STONITH_OPERATION, op); crm_xml_add_int(op_msg, F_STONITH_CALLID, call_id); crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(op_msg, F_STONITH_CALLOPTS, call_options); if (data != NULL) { add_message_xml(op_msg, F_STONITH_CALLDATA, data); } return op_msg; } static void stonith_destroy_op_callback(gpointer data) { stonith_callback_client_t *blob = data; if (blob->timer && blob->timer->ref > 0) { g_source_remove(blob->timer->ref); } free(blob->timer); free(blob); } static int stonith_api_signoff(stonith_t * stonith) { stonith_private_t *native = stonith->st_private; crm_debug("Disconnecting from the fencer"); if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } free(native->token); native->token = NULL; stonith->state = stonith_disconnected; return pcmk_ok; } static int stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks) { stonith_private_t *private = stonith->st_private; if (all_callbacks) { private->op_callback = NULL; g_hash_table_destroy(private->stonith_op_callback_table); private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, stonith_destroy_op_callback); } else if (call_id == 0) { private->op_callback = NULL; } else { g_hash_table_remove(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); } return pcmk_ok; } static void invoke_callback(stonith_t * st, int call_id, int rc, void *userdata, void (*callback) (stonith_t * st, stonith_callback_data_t * data)) { stonith_callback_data_t data = { 0, }; data.call_id = call_id; data.rc = rc; data.userdata = userdata; callback(st, &data); } static void stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc) { stonith_private_t *private = NULL; stonith_callback_client_t *blob = NULL; stonith_callback_client_t local_blob; CRM_CHECK(stonith != NULL, return); CRM_CHECK(stonith->st_private != NULL, return); private = stonith->st_private; local_blob.id = NULL; local_blob.callback = NULL; local_blob.user_data = NULL; local_blob.only_success = FALSE; if (msg != NULL) { crm_element_value_int(msg, F_STONITH_RC, &rc); crm_element_value_int(msg, F_STONITH_CALLID, &call_id); } CRM_CHECK(call_id > 0, crm_log_xml_err(msg, "Bad result")); blob = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); if (blob != NULL) { local_blob = *blob; blob = NULL; stonith_api_del_callback(stonith, call_id, FALSE); } else { crm_trace("No callback found for call %d", call_id); local_blob.callback = NULL; } if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) { crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id); invoke_callback(stonith, call_id, rc, local_blob.user_data, local_blob.callback); } else if (private->op_callback == NULL && rc != pcmk_ok) { crm_warn("Fencing command failed: %s", pcmk_strerror(rc)); crm_log_xml_debug(msg, "Failed fence update"); } if (private->op_callback != NULL) { crm_trace("Invoking global callback for call %d", call_id); invoke_callback(stonith, call_id, rc, NULL, private->op_callback); } crm_trace("OP callback activated."); } static gboolean stonith_async_timeout_handler(gpointer data) { struct timer_rec_s *timer = data; crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout); stonith_perform_callback(timer->stonith, NULL, timer->call_id, -ETIME); /* Always return TRUE, never remove the handler * We do that in stonith_del_callback() */ return TRUE; } static void set_callback_timeout(stonith_callback_client_t * callback, stonith_t * stonith, int call_id, int timeout) { struct timer_rec_s *async_timer = callback->timer; if (timeout <= 0) { return; } if (!async_timer) { async_timer = calloc(1, sizeof(struct timer_rec_s)); callback->timer = async_timer; } async_timer->stonith = stonith; async_timer->call_id = call_id; /* Allow a fair bit of grace to allow the server to tell us of a timeout * This is only a fallback */ async_timer->timeout = (timeout + 60) * 1000; if (async_timer->ref) { g_source_remove(async_timer->ref); } async_timer->ref = g_timeout_add(async_timer->timeout, stonith_async_timeout_handler, async_timer); } static void update_callback_timeout(int call_id, int timeout, stonith_t * st) { stonith_callback_client_t *callback = NULL; stonith_private_t *private = st->st_private; callback = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); if (!callback || !callback->allow_timeout_updates) { return; } set_callback_timeout(callback, st, call_id, timeout); } static int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { const char *type = NULL; struct notify_blob_s blob; stonith_t *st = userdata; stonith_private_t *private = NULL; CRM_ASSERT(st != NULL); private = st->st_private; blob.stonith = st; blob.xml = string2xml(buffer); if (blob.xml == NULL) { crm_warn("Received malformed message from fencer: %s", buffer); return 0; } /* do callbacks */ type = crm_element_value(blob.xml, F_TYPE); crm_trace("Activating %s callbacks...", type); if (safe_str_eq(type, T_STONITH_NG)) { stonith_perform_callback(st, blob.xml, 0, 0); } else if (safe_str_eq(type, T_STONITH_NOTIFY)) { foreach_notify_entry(private, stonith_send_notification, &blob); } else if (safe_str_eq(type, T_STONITH_TIMEOUT_VALUE)) { int call_id = 0; int timeout = 0; crm_element_value_int(blob.xml, F_STONITH_TIMEOUT, &timeout); crm_element_value_int(blob.xml, F_STONITH_CALLID, &call_id); update_callback_timeout(call_id, timeout, st); } else { crm_err("Unknown message type: %s", type); crm_log_xml_warn(blob.xml, "BadReply"); } free_xml(blob.xml); return 1; } static int stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd) { int rc = pcmk_ok; stonith_private_t *native = NULL; const char *display_name = name? name : "client"; struct ipc_client_callbacks st_callbacks = { .dispatch = stonith_dispatch_internal, .destroy = stonith_connection_destroy }; CRM_CHECK(stonith != NULL, return -EINVAL); native = stonith->st_private; CRM_ASSERT(native != NULL); crm_debug("Attempting fencer connection by %s with%s mainloop", display_name, (stonith_fd? "out" : "")); stonith->state = stonith_connected_command; if (stonith_fd) { /* No mainloop */ native->ipc = crm_ipc_new("stonith-ng", 0); if (native->ipc && crm_ipc_connect(native->ipc)) { *stonith_fd = crm_ipc_get_fd(native->ipc); } else if (native->ipc) { crm_ipc_close(native->ipc); crm_ipc_destroy(native->ipc); native->ipc = NULL; } } else { /* With mainloop */ native->source = mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { rc = -ENOTCONN; } else { xmlNode *reply = NULL; xmlNode *hello = create_xml_node(NULL, "stonith_command"); crm_xml_add(hello, F_TYPE, T_STONITH_NG); crm_xml_add(hello, F_STONITH_OPERATION, CRM_OP_REGISTER); crm_xml_add(hello, F_STONITH_CLIENTNAME, name); rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply); if (rc < 0) { crm_debug("Couldn't register with the fencer: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); rc = -ECOMM; } else if (reply == NULL) { crm_debug("Couldn't register with the fencer: no reply"); rc = -EPROTO; } else { const char *msg_type = crm_element_value(reply, F_STONITH_OPERATION); native->token = crm_element_value_copy(reply, F_STONITH_CLIENTID); if (safe_str_neq(msg_type, CRM_OP_REGISTER)) { crm_debug("Couldn't register with the fencer: invalid reply type '%s'", (msg_type? msg_type : "(missing)")); crm_log_xml_debug(reply, "Invalid fencer reply"); rc = -EPROTO; } else if (native->token == NULL) { crm_debug("Couldn't register with the fencer: no token in reply"); crm_log_xml_debug(reply, "Invalid fencer reply"); rc = -EPROTO; } else { #if HAVE_MSGFROMIPC_TIMEOUT stonith->call_timeout = MAX_IPC_DELAY; #endif crm_debug("Connection to fencer by %s succeeded (registration token: %s)", display_name, native->token); rc = pcmk_ok; } } free_xml(reply); free_xml(hello); } if (rc != pcmk_ok) { crm_debug("Connection attempt to fencer by %s failed: %s " CRM_XS " rc=%d", display_name, pcmk_strerror(rc), rc); stonith->cmds->disconnect(stonith); } return rc; } static int stonith_set_notification(stonith_t * stonith, const char *callback, int enabled) { int rc = pcmk_ok; xmlNode *notify_msg = create_xml_node(NULL, __FUNCTION__); stonith_private_t *native = stonith->st_private; if (stonith->state != stonith_disconnected) { crm_xml_add(notify_msg, F_STONITH_OPERATION, T_STONITH_NOTIFY); if (enabled) { crm_xml_add(notify_msg, F_STONITH_NOTIFY_ACTIVATE, callback); } else { crm_xml_add(notify_msg, F_STONITH_NOTIFY_DEACTIVATE, callback); } rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL); if (rc < 0) { crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc); rc = -ECOMM; } else { rc = pcmk_ok; } } free_xml(notify_msg); return rc; } static int stonith_api_add_notification(stonith_t * stonith, const char *event, void (*callback) (stonith_t * stonith, stonith_event_t * e)) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; private = stonith->st_private; crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list)); new_client = calloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = callback; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); if (list_item != NULL) { crm_warn("Callback already present"); free(new_client); return -ENOTUNIQ; } else { private->notify_list = g_list_append(private->notify_list, new_client); stonith_set_notification(stonith, event, 1); crm_trace("Callback added (%d)", g_list_length(private->notify_list)); } return pcmk_ok; } static int stonith_api_del_notification(stonith_t * stonith, const char *event) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; crm_debug("Removing callback for %s events", event); private = stonith->st_private; new_client = calloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = NULL; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); stonith_set_notification(stonith, event, 0); if (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; if (private->notify_refcnt) { list_client->delete = TRUE; private->notify_deletes = TRUE; } else { private->notify_list = g_list_remove(private->notify_list, list_client); free(list_client); } crm_trace("Removed callback"); } else { crm_trace("Callback not present"); } free(new_client); return pcmk_ok; } static int stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options, void *user_data, const char *callback_name, void (*callback) (stonith_t * st, stonith_callback_data_t * data)) { stonith_callback_client_t *blob = NULL; stonith_private_t *private = NULL; CRM_CHECK(stonith != NULL, return -EINVAL); CRM_CHECK(stonith->st_private != NULL, return -EINVAL); private = stonith->st_private; if (call_id == 0) { private->op_callback = callback; } else if (call_id < 0) { if (!(options & st_opt_report_only_success)) { crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id)); invoke_callback(stonith, call_id, call_id, user_data, callback); } else { crm_warn("Fencer call failed: %s", pcmk_strerror(call_id)); } return FALSE; } blob = calloc(1, sizeof(stonith_callback_client_t)); blob->id = callback_name; blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE; blob->user_data = user_data; blob->callback = callback; blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE; if (timeout > 0) { set_callback_timeout(blob, stonith, call_id, timeout); } g_hash_table_insert(private->stonith_op_callback_table, GINT_TO_POINTER(call_id), blob); crm_trace("Added callback to %s for call %d", callback_name, call_id); return TRUE; } static void stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data) { int call = GPOINTER_TO_INT(key); stonith_callback_client_t *blob = value; crm_debug("Call %d (%s): pending", call, crm_str(blob->id)); } void stonith_dump_pending_callbacks(stonith_t * stonith) { stonith_private_t *private = stonith->st_private; if (private->stonith_op_callback_table == NULL) { return; } return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL); } /* */ static stonith_event_t * xml_to_event(xmlNode * msg) { stonith_event_t *event = calloc(1, sizeof(stonith_event_t)); const char *ntype = crm_element_value(msg, F_SUBTYPE); char *data_addr = crm_strdup_printf("//%s", ntype); xmlNode *data = get_xpath_object(data_addr, msg, LOG_DEBUG); crm_log_xml_trace(msg, "stonith_notify"); crm_element_value_int(msg, F_STONITH_RC, &(event->result)); if (safe_str_eq(ntype, T_STONITH_NOTIFY_FENCE)) { event->operation = crm_element_value_copy(msg, F_STONITH_OPERATION); if (data) { event->origin = crm_element_value_copy(data, F_STONITH_ORIGIN); event->action = crm_element_value_copy(data, F_STONITH_ACTION); event->target = crm_element_value_copy(data, F_STONITH_TARGET); event->executioner = crm_element_value_copy(data, F_STONITH_DELEGATE); event->id = crm_element_value_copy(data, F_STONITH_REMOTE_OP_ID); event->client_origin = crm_element_value_copy(data, F_STONITH_CLIENTNAME); event->device = crm_element_value_copy(data, F_STONITH_DEVICE); } else { crm_err("No data for %s event", ntype); crm_log_xml_notice(msg, "BadEvent"); } } free(data_addr); return event; } static void event_free(stonith_event_t * event) { free(event->id); free(event->type); free(event->message); free(event->operation); free(event->origin); free(event->action); free(event->target); free(event->executioner); free(event->device); free(event->client_origin); free(event); } static void stonith_send_notification(gpointer data, gpointer user_data) { struct notify_blob_s *blob = user_data; stonith_notify_client_t *entry = data; stonith_event_t *st_event = NULL; const char *event = NULL; if (blob->xml == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = crm_element_value(blob->xml, F_SUBTYPE); if (entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if (entry->delete) { crm_trace("Skipping callback - marked for deletion"); return; } else if (entry->notify == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if (safe_str_neq(entry->event, event)) { crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } st_event = xml_to_event(blob->xml); crm_trace("Invoking callback for %p/%s event...", entry, event); entry->notify(blob->stonith, st_event); crm_trace("Callback invoked..."); event_free(st_event); } /*! * \internal * \brief Create and send an API request * * \param[in] stonith Stonith connection * \param[in] op API operation to request * \param[in] data Data to attach to request * \param[out] output_data If not NULL, will be set to reply if synchronous * \param[in] call_options Bitmask of stonith_call_options to use * \param[in] timeout Error if not completed within this many seconds * * \return pcmk_ok (for synchronous requests) or positive call ID * (for asynchronous requests) on success, -errno otherwise */ static int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data, int call_options, int timeout) { int rc = 0; int reply_id = -1; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; stonith_private_t *native = NULL; CRM_ASSERT(stonith && stonith->st_private && op); native = stonith->st_private; if (output_data != NULL) { *output_data = NULL; } if ((stonith->state == stonith_disconnected) || (native->token == NULL)) { return -ENOTCONN; } /* Increment the call ID, which must be positive to avoid conflicting with * error codes. This shouldn't be a problem unless the client mucked with * it or the counter wrapped around. */ stonith->call_id++; if (stonith->call_id < 1) { stonith->call_id = 1; } op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options); if (op_msg == NULL) { return -EINVAL; } crm_xml_add_int(op_msg, F_STONITH_TIMEOUT, timeout); crm_trace("Sending %s message to fencer with timeout %ds", op, timeout); if (data) { const char *delay_s = crm_element_value(data, F_STONITH_DELAY); if (delay_s) { crm_xml_add(op_msg, F_STONITH_DELAY, delay_s); } } { enum crm_ipc_flags ipc_flags = crm_ipc_flags_none; if (call_options & st_opt_sync_call) { ipc_flags |= crm_ipc_client_response; } rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, 1000 * (timeout + 60), &op_reply); } free_xml(op_msg); if (rc < 0) { crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc); rc = -ECOMM; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (!(call_options & st_opt_sync_call)) { crm_trace("Async call %d, returning", stonith->call_id); free_xml(op_reply); return stonith->call_id; } rc = pcmk_ok; crm_element_value_int(op_reply, F_STONITH_CALLID, &reply_id); if (reply_id == stonith->call_id) { crm_trace("Synchronous reply %d received", reply_id); if (crm_element_value_int(op_reply, F_STONITH_RC, &rc) != 0) { rc = -ENOMSG; } if ((call_options & st_opt_discard_reply) || output_data == NULL) { crm_trace("Discarding reply"); } else { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } } else if (reply_id <= 0) { crm_err("Received bad reply: No id set"); crm_log_xml_err(op_reply, "Bad reply"); free_xml(op_reply); rc = -ENOMSG; } else { crm_err("Received bad reply: %d (wanted %d)", reply_id, stonith->call_id); crm_log_xml_err(op_reply, "Old reply"); free_xml(op_reply); rc = -ENOMSG; } done: if (crm_ipc_connected(native->ipc) == FALSE) { crm_err("Fencer disconnected"); free(native->token); native->token = NULL; stonith->state = stonith_disconnected; } free_xml(op_reply); return rc; } /* Not used with mainloop */ bool stonith_dispatch(stonith_t * st) { gboolean stay_connected = TRUE; stonith_private_t *private = NULL; CRM_ASSERT(st != NULL); private = st->st_private; while (crm_ipc_ready(private->ipc)) { if (crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); stonith_dispatch_internal(msg, strlen(msg), st); } if (crm_ipc_connected(private->ipc) == FALSE) { crm_err("Connection closed"); stay_connected = FALSE; } } return stay_connected; } static int stonith_api_free(stonith_t * stonith) { int rc = pcmk_ok; crm_trace("Destroying %p", stonith); if (stonith->state != stonith_disconnected) { crm_trace("Disconnecting %p first", stonith); rc = stonith->cmds->disconnect(stonith); } if (stonith->state == stonith_disconnected) { stonith_private_t *private = stonith->st_private; crm_trace("Removing %d callbacks", g_hash_table_size(private->stonith_op_callback_table)); g_hash_table_destroy(private->stonith_op_callback_table); crm_trace("Destroying %d notification clients", g_list_length(private->notify_list)); g_list_free_full(private->notify_list, free); free(stonith->st_private); free(stonith->cmds); free(stonith); } else { crm_err("Not free'ing active connection: %s (%d)", pcmk_strerror(rc), rc); } return rc; } void stonith_api_delete(stonith_t * stonith) { crm_trace("Destroying %p", stonith); if(stonith) { stonith->cmds->free(stonith); } } static int stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id, const char *namespace_s, const char *agent, stonith_key_value_t *params, int timeout, char **output, char **error_output) { /* Validation should be done directly via the agent, so we can get it from * stonith_admin when the cluster is not running, which is important for * higher-level tools. */ int rc = pcmk_ok; /* Use a dummy node name in case the agent requires a target. We assume the * actual target doesn't matter for validation purposes (if in practice, * that is incorrect, we will need to allow the caller to pass the target). */ const char *target = "node1"; GHashTable *params_table = crm_str_table_new(); // Convert parameter list to a hash table for (; params; params = params->next) { // Strip out Pacemaker-implemented parameters if (!pcmk__starts_with(params->key, "pcmk_") && strcmp(params->key, "provides") && strcmp(params->key, "stonith-timeout")) { g_hash_table_insert(params_table, strdup(params->key), strdup(params->value)); } } #if SUPPORT_CIBSECRETS rc = pcmk__substitute_secrets(rsc_id, params_table); if (rc != pcmk_rc_ok) { crm_warn("Could not replace secret parameters for validation of %s: %s", agent, pcmk_rc_str(rc)); rc = pcmk_rc2legacy(rc); } #endif if (output) { *output = NULL; } if (error_output) { *error_output = NULL; } switch (stonith_get_namespace(agent, namespace_s)) { case st_namespace_rhcs: rc = stonith__rhcs_validate(st, call_options, target, agent, params_table, timeout, output, error_output); break; #if HAVE_STONITH_STONITH_H case st_namespace_lha: rc = stonith__lha_validate(st, call_options, target, agent, params_table, timeout, output, error_output); break; #endif default: rc = -EINVAL; errno = EINVAL; crm_perror(LOG_ERR, "Agent %s not found or does not support validation", agent); break; } g_hash_table_destroy(params_table); return rc; } stonith_t * stonith_api_new(void) { stonith_t *new_stonith = NULL; stonith_private_t *private = NULL; new_stonith = calloc(1, sizeof(stonith_t)); if (new_stonith == NULL) { return NULL; } private = calloc(1, sizeof(stonith_private_t)); if (private == NULL) { free(new_stonith); return NULL; } new_stonith->st_private = private; private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, stonith_destroy_op_callback); private->notify_list = NULL; private->notify_refcnt = 0; private->notify_deletes = FALSE; new_stonith->call_id = 1; new_stonith->state = stonith_disconnected; new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t)); if (new_stonith->cmds == NULL) { free(new_stonith->st_private); free(new_stonith); return NULL; } /* *INDENT-OFF* */ new_stonith->cmds->free = stonith_api_free; new_stonith->cmds->connect = stonith_api_signon; new_stonith->cmds->disconnect = stonith_api_signoff; new_stonith->cmds->list = stonith_api_list; new_stonith->cmds->monitor = stonith_api_monitor; new_stonith->cmds->status = stonith_api_status; new_stonith->cmds->fence = stonith_api_fence; new_stonith->cmds->fence_with_delay = stonith_api_fence_with_delay; new_stonith->cmds->confirm = stonith_api_confirm; new_stonith->cmds->history = stonith_api_history; new_stonith->cmds->list_agents = stonith_api_device_list; new_stonith->cmds->metadata = stonith_api_device_metadata; new_stonith->cmds->query = stonith_api_query; new_stonith->cmds->remove_device = stonith_api_remove_device; new_stonith->cmds->register_device = stonith_api_register_device; new_stonith->cmds->remove_level = stonith_api_remove_level; new_stonith->cmds->remove_level_full = stonith_api_remove_level_full; new_stonith->cmds->register_level = stonith_api_register_level; new_stonith->cmds->register_level_full = stonith_api_register_level_full; new_stonith->cmds->remove_callback = stonith_api_del_callback; new_stonith->cmds->register_callback = stonith_api_add_callback; new_stonith->cmds->remove_notification = stonith_api_del_notification; new_stonith->cmds->register_notification = stonith_api_add_notification; new_stonith->cmds->validate = stonith_api_validate; /* *INDENT-ON* */ return new_stonith; } /*! * \brief Make a blocking connection attempt to the fencer * * \param[in,out] st Fencer API object * \param[in] name Client name to use with fencer * \param[in] max_attempts Return error if this many attempts fail * * \return pcmk_ok on success, result of last attempt otherwise */ int stonith_api_connect_retry(stonith_t *st, const char *name, int max_attempts) { int rc = -EINVAL; // if max_attempts is not positive for (int attempt = 1; attempt <= max_attempts; attempt++) { rc = st->cmds->connect(st, name, NULL); if (rc == pcmk_ok) { return pcmk_ok; } else if (attempt < max_attempts) { crm_notice("Fencer connection attempt %d of %d failed (retrying in 2s): %s " CRM_XS " rc=%d", attempt, max_attempts, pcmk_strerror(rc), rc); sleep(2); } } crm_notice("Could not connect to fencer: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); return rc; } stonith_key_value_t * stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value) { stonith_key_value_t *p, *end; p = calloc(1, sizeof(stonith_key_value_t)); if (key) { p->key = strdup(key); } if (value) { p->value = strdup(value); } end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values) { stonith_key_value_t *p; while (head) { p = head->next; if (keys) { free(head->key); } if (values) { free(head->value); } free(head); head = p; } } #define api_log_open() openlog("stonith-api", LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON) #define api_log(level, fmt, args...) syslog(level, "%s: "fmt, __FUNCTION__, args) int stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off) { int rc = pcmk_ok; stonith_t *st = stonith_api_new(); const char *action = off? "off" : "reboot"; api_log_open(); if (st == NULL) { api_log(LOG_ERR, "API initialization failed, could not kick (%s) node %u/%s", action, nodeid, uname); return -EPROTO; } rc = st->cmds->connect(st, "stonith-api", NULL); if (rc != pcmk_ok) { api_log(LOG_ERR, "Connection failed, could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc); } else { char *name = NULL; enum stonith_call_options opts = st_opt_sync_call | st_opt_allow_suicide; if (uname != NULL) { name = strdup(uname); } else if (nodeid > 0) { opts |= st_opt_cs_nodeid; name = crm_itoa(nodeid); } rc = st->cmds->fence(st, opts, name, action, timeout, 0); free(name); if (rc != pcmk_ok) { api_log(LOG_ERR, "Could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc); } else { api_log(LOG_NOTICE, "Node %u/%s kicked: %s", nodeid, uname, action); } } stonith_api_delete(st); return rc; } time_t stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress) { int rc = pcmk_ok; time_t when = 0; stonith_t *st = stonith_api_new(); stonith_history_t *history = NULL, *hp = NULL; if (st == NULL) { api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: " "API initialization failed", nodeid, uname); return when; } rc = st->cmds->connect(st, "stonith-api", NULL); if (rc != pcmk_ok) { api_log(LOG_NOTICE, "Connection failed: %s (%d)", pcmk_strerror(rc), rc); } else { int entries = 0; int progress = 0; int completed = 0; char *name = NULL; enum stonith_call_options opts = st_opt_sync_call; if (uname != NULL) { name = strdup(uname); } else if (nodeid > 0) { opts |= st_opt_cs_nodeid; name = crm_itoa(nodeid); } rc = st->cmds->history(st, opts, name, &history, 120); free(name); for (hp = history; hp; hp = hp->next) { entries++; if (in_progress) { progress++; if (hp->state != st_done && hp->state != st_failed) { when = time(NULL); } } else if (hp->state == st_done) { completed++; if (hp->completed > when) { when = hp->completed; } } } stonith_history_free(history); if(rc == pcmk_ok) { api_log(LOG_INFO, "Found %d entries for %u/%s: %d in progress, %d completed", entries, nodeid, uname, progress, completed); } else { api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: %s (%d)", nodeid, uname, pcmk_strerror(rc), rc); } } stonith_api_delete(st); if(when) { api_log(LOG_INFO, "Node %u/%s last kicked at: %ld", nodeid, uname, (long int)when); } return when; } bool stonith_agent_exists(const char *agent, int timeout) { stonith_t *st = NULL; stonith_key_value_t *devices = NULL; stonith_key_value_t *dIter = NULL; bool rc = FALSE; if (agent == NULL) { return rc; } st = stonith_api_new(); if (st == NULL) { crm_err("Could not list fence agents: API memory allocation failed"); return FALSE; } st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout == 0 ? 120 : timeout); for (dIter = devices; dIter != NULL; dIter = dIter->next) { if (crm_str_eq(dIter->value, agent, TRUE)) { rc = TRUE; break; } } stonith_key_value_freeall(devices, 1, 1); stonith_api_delete(st); return rc; } const char * stonith_action_str(const char *action) { if (action == NULL) { return "fencing"; } else if (!strcmp(action, "on")) { return "unfencing"; } else if (!strcmp(action, "off")) { return "turning off"; } else { return action; } } /*! * \internal * \brief Parse a target name from one line of a target list string * * \param[in] line One line of a target list string * \parma[in] len String length of line * \param[in,out] output List to add newly allocated target name to */ static void parse_list_line(const char *line, int len, GList **output) { size_t i = 0; size_t entry_start = 0; /* Skip complaints about additional parameters device doesn't understand * * @TODO Document or eliminate the implied restriction of target names */ if (strstr(line, "invalid") || strstr(line, "variable")) { crm_debug("Skipping list output line: %s", line); return; } // Process line content, character by character for (i = 0; i <= len; i++) { if (isspace(line[i]) || (line[i] == ',') || (line[i] == ';') || (line[i] == '\0')) { // We've found a separator (i.e. the end of an entry) int rc = 0; char *entry = NULL; if (i == entry_start) { // Skip leading and sequential separators entry_start = i + 1; continue; } entry = calloc(i - entry_start + 1, sizeof(char)); CRM_ASSERT(entry != NULL); /* Read entry, stopping at first separator * * @TODO Document or eliminate these character restrictions */ rc = sscanf(line + entry_start, "%[a-zA-Z0-9_-.]", entry); if (rc != 1) { crm_warn("Could not parse list output entry: %s " CRM_XS " entry_start=%d position=%d", line + entry_start, entry_start, i); free(entry); } else if (safe_str_eq(entry, "on") || safe_str_eq(entry, "off")) { /* Some agents print the target status in the list output, * though none are known now (the separate list-status command * is used for this, but it can also print "UNKNOWN"). To handle * this possibility, skip such entries. * * @TODO Document or eliminate the implied restriction of target * names. */ free(entry); } else { // We have a valid entry *output = g_list_append(*output, entry); } entry_start = i + 1; } } } /*! * \internal * \brief Parse a list of targets from a string * * \param[in] list_output Target list as a string * * \return List of target names * \note The target list string format is flexible, to allow for user-specified * lists such pcmk_host_list and the output of an agent's list action * (whether direct or via the API, which escapes newlines). There may be * multiple lines, separated by either a newline or an escaped newline * (backslash n). Each line may have one or more target names, separated * by any combination of whitespace, commas, and semi-colons. Lines * containing "invalid" or "variable" will be ignored entirely. Target * names "on" or "off" (case-insensitive) will be ignored. Target names * may contain only alphanumeric characters, underbars (_), dashes (-), * and dots (.) (if any other character occurs in the name, it and all * subsequent characters in the name will be ignored). * \note The caller is responsible for freeing the result with * g_list_free_full(result, free). */ GList * stonith__parse_targets(const char *target_spec) { GList *targets = NULL; if (target_spec != NULL) { size_t out_len = strlen(target_spec); size_t line_start = 0; // Starting index of line being processed for (size_t i = 0; i <= out_len; ++i) { if ((target_spec[i] == '\n') || (target_spec[i] == '\0') || ((target_spec[i] == '\\') && (target_spec[i + 1] == 'n'))) { // We've reached the end of one line of output int len = i - line_start; if (len > 0) { char *line = strndup(target_spec + line_start, len); line[len] = '\0'; // Because it might be a newline parse_list_line(line, len, &targets); free(line); } if (target_spec[i] == '\\') { ++i; // backslash-n takes up two positions } line_start = i + 1; } } } return targets; } /*! * \internal * \brief Determine if a later stonith event succeeded. * * \note Before calling this function, use stonith__sort_history() to sort the * top_history argument. */ gboolean stonith__later_succeeded(stonith_history_t *event, stonith_history_t *top_history) { gboolean ret = FALSE; for (stonith_history_t *prev_hp = top_history; prev_hp; prev_hp = prev_hp->next) { if (prev_hp == event) { break; } if ((prev_hp->state == st_done) && safe_str_eq(event->target, prev_hp->target) && safe_str_eq(event->action, prev_hp->action) && safe_str_eq(event->delegate, prev_hp->delegate) && (event->completed < prev_hp->completed)) { ret = TRUE; break; } } return ret; } /*! * \internal * \brief Sort the stonith-history * sort by competed most current on the top * pending actions lacking a completed-stamp are gathered at the top * * \param[in] history List of stonith actions * */ stonith_history_t * stonith__sort_history(stonith_history_t *history) { stonith_history_t *new = NULL, *pending = NULL, *hp, *np, *tmp; for (hp = history; hp; ) { tmp = hp->next; if ((hp->state == st_done) || (hp->state == st_failed)) { /* sort into new */ if ((!new) || (hp->completed > new->completed)) { hp->next = new; new = hp; } else { np = new; do { if ((!np->next) || (hp->completed > np->next->completed)) { hp->next = np->next; np->next = hp; break; } np = np->next; } while (1); } } else { /* put into pending */ hp->next = pending; pending = hp; } hp = tmp; } /* pending actions don't have a completed-stamp so make them go front */ if (pending) { stonith_history_t *last_pending = pending; while (last_pending->next) { last_pending = last_pending->next; } last_pending->next = new; new = pending; } return new; } // Deprecated functions kept only for backward API compatibility const char *get_stonith_provider(const char *agent, const char *provider); /*! * \brief Deprecated (use stonith_get_namespace() instead) */ const char * get_stonith_provider(const char *agent, const char *provider) { return stonith_namespace2text(stonith_get_namespace(agent, provider)); } diff --git a/lib/fencing/st_lha.c b/lib/fencing/st_lha.c index 26a8dba1b4..8ae2276aba 100644 --- a/lib/fencing/st_lha.c +++ b/lib/fencing/st_lha.c @@ -1,258 +1,281 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #define LHA_STONITH_LIBRARY "libstonith.so.1" static void *lha_agents_lib = NULL; static const char META_TEMPLATE[] = "\n" "\n" "\n" " 1.0\n" " \n" "%s\n" " \n" " %s\n" "%s\n" " \n" " \n" " \n" " \n" " \n" " \n" " \n" " \n" " 2.0\n" " \n" "\n"; +static void * +find_library_function(void **handle, const char *lib, const char *fn) +{ + void *a_function; + + if (*handle == NULL) { + *handle = dlopen(lib, RTLD_LAZY); + if ((*handle) == NULL) { + crm_err("Could not open %s: %s", lib, dlerror()); + return NULL; + } + } + + a_function = dlsym(*handle, fn); + if (a_function == NULL) { + crm_err("Could not find %s in %s: %s", fn, lib, dlerror()); + } + + return a_function; +} + /*! * \brief Determine namespace of a fence agent * * \param[in] agent Fence agent type * \param[in] namespace_s Name of agent namespace as string, if known * * \return Namespace of specified agent, as enum value */ bool stonith__agent_is_lha(const char *agent) { Stonith *stonith_obj = NULL; static gboolean need_init = TRUE; static Stonith *(*st_new_fn) (const char *) = NULL; static void (*st_del_fn) (Stonith *) = NULL; if (need_init) { need_init = FALSE; st_new_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, - "stonith_new", FALSE); + "stonith_new"); st_del_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, - "stonith_delete", FALSE); + "stonith_delete"); } if (lha_agents_lib && st_new_fn && st_del_fn) { stonith_obj = (*st_new_fn) (agent); if (stonith_obj) { (*st_del_fn) (stonith_obj); return TRUE; } } return FALSE; } int stonith__list_lha_agents(stonith_key_value_t **devices) { static gboolean need_init = TRUE; int count = 0; char **entry = NULL; char **type_list = NULL; static char **(*type_list_fn) (void) = NULL; static void (*type_free_fn) (char **) = NULL; if (need_init) { need_init = FALSE; - type_list_fn = - find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_types", FALSE); - type_free_fn = - find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_free_hostlist", - FALSE); + type_list_fn = find_library_function(&lha_agents_lib, + LHA_STONITH_LIBRARY, + "stonith_types"); + type_free_fn = find_library_function(&lha_agents_lib, + LHA_STONITH_LIBRARY, + "stonith_free_hostlist"); } if (type_list_fn) { type_list = (*type_list_fn) (); } for (entry = type_list; entry != NULL && *entry; ++entry) { crm_trace("Added: %s", *entry); *devices = stonith_key_value_add(*devices, NULL, *entry); count++; } if (type_list && type_free_fn) { (*type_free_fn) (type_list); } return count; } static inline char * strdup_null(const char *val) { if (val) { return strdup(val); } return NULL; } static void stonith_plugin(int priority, const char *fmt, ...) __attribute__((__format__ (__printf__, 2, 3))); static void stonith_plugin(int priority, const char *format, ...) { int err = errno; va_list ap; int len = 0; char *string = NULL; va_start(ap, format); len = vasprintf (&string, format, ap); va_end(ap); CRM_ASSERT(len > 0); do_crm_log_alias(priority, __FILE__, __func__, __LINE__, "%s", string); free(string); errno = err; } int stonith__lha_metadata(const char *agent, int timeout, char **output) { int rc = 0; char *buffer = NULL; static const char *no_parameter_info = ""; Stonith *stonith_obj = NULL; static gboolean need_init = TRUE; static Stonith *(*st_new_fn) (const char *) = NULL; static const char *(*st_info_fn) (Stonith *, int) = NULL; static void (*st_del_fn) (Stonith *) = NULL; static void (*st_log_fn) (Stonith *, PILLogFun) = NULL; if (need_init) { need_init = FALSE; st_new_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, - "stonith_new", FALSE); + "stonith_new"); st_del_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, - "stonith_delete", FALSE); + "stonith_delete"); st_log_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, - "stonith_set_log", FALSE); + "stonith_set_log"); st_info_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, - "stonith_get_info", FALSE); + "stonith_get_info"); } if (lha_agents_lib && st_new_fn && st_del_fn && st_info_fn && st_log_fn) { char *xml_meta_longdesc = NULL; char *xml_meta_shortdesc = NULL; char *meta_param = NULL; char *meta_longdesc = NULL; char *meta_shortdesc = NULL; stonith_obj = (*st_new_fn) (agent); if (stonith_obj) { (*st_log_fn) (stonith_obj, (PILLogFun) & stonith_plugin); meta_longdesc = strdup_null((*st_info_fn) (stonith_obj, ST_DEVICEDESCR)); if (meta_longdesc == NULL) { crm_warn("no long description in %s's metadata.", agent); meta_longdesc = strdup(no_parameter_info); } meta_shortdesc = strdup_null((*st_info_fn) (stonith_obj, ST_DEVICEID)); if (meta_shortdesc == NULL) { crm_warn("no short description in %s's metadata.", agent); meta_shortdesc = strdup(no_parameter_info); } meta_param = strdup_null((*st_info_fn) (stonith_obj, ST_CONF_XML)); if (meta_param == NULL) { crm_warn("no list of parameters in %s's metadata.", agent); meta_param = strdup(no_parameter_info); } (*st_del_fn) (stonith_obj); } else { errno = EINVAL; crm_perror(LOG_ERR, "Agent %s not found", agent); return -EINVAL; } xml_meta_longdesc = (char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_longdesc); xml_meta_shortdesc = (char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_shortdesc); buffer = crm_strdup_printf(META_TEMPLATE, agent, xml_meta_longdesc, xml_meta_shortdesc, meta_param); xmlFree(xml_meta_longdesc); xmlFree(xml_meta_shortdesc); free(meta_shortdesc); free(meta_longdesc); free(meta_param); } if (output) { *output = buffer; } else { free(buffer); } return rc; } /* Implement a dummy function that uses -lpils so that linkers don't drop the * reference. */ #include const char *i_hate_pils(int rc); const char * i_hate_pils(int rc) { return PIL_strerror(rc); } int stonith__lha_validate(stonith_t *st, int call_options, const char *target, const char *agent, GHashTable *params, int timeout, char **output, char **error_output) { errno = EOPNOTSUPP; crm_perror(LOG_ERR, "Cannot validate Linux-HA fence agents"); return -EOPNOTSUPP; } diff --git a/lib/lrmd/proxy_common.c b/lib/lrmd/proxy_common.c index cd472510f7..75874bbe57 100644 --- a/lib/lrmd/proxy_common.c +++ b/lib/lrmd/proxy_common.c @@ -1,311 +1,312 @@ /* * Copyright 2015-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include +#include int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); GHashTable *proxy_table = NULL; static void remote_proxy_notify_destroy(lrmd_t *lrmd, const char *session_id) { /* sending to the remote node that an ipc connection has been destroyed */ xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); crm_xml_add(msg, F_LRMD_IPC_SESSION, session_id); lrmd_internal_proxy_send(lrmd, msg); free_xml(msg); } /*! * \brief Send an acknowledgment of a remote proxy shutdown request. * * \param[in] lrmd Connection to proxy */ void remote_proxy_ack_shutdown(lrmd_t *lrmd) { xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_ACK); lrmd_internal_proxy_send(lrmd, msg); free_xml(msg); } /*! * \brief We're not going to shutdown as response to * a remote proxy shutdown request. * * \param[in] lrmd Connection to proxy */ void remote_proxy_nack_shutdown(lrmd_t *lrmd) { xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_NACK); lrmd_internal_proxy_send(lrmd, msg); free_xml(msg); } void remote_proxy_relay_event(remote_proxy_t *proxy, xmlNode *msg) { /* sending to the remote node an event msg. */ xmlNode *event = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(event, F_LRMD_IPC_OP, LRMD_IPC_OP_EVENT); crm_xml_add(event, F_LRMD_IPC_SESSION, proxy->session_id); add_message_xml(event, F_LRMD_IPC_MSG, msg); crm_log_xml_explicit(event, "EventForProxy"); lrmd_internal_proxy_send(proxy->lrm, event); free_xml(event); } void remote_proxy_relay_response(remote_proxy_t *proxy, xmlNode *msg, int msg_id) { /* sending to the remote node a response msg. */ xmlNode *response = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(response, F_LRMD_IPC_OP, LRMD_IPC_OP_RESPONSE); crm_xml_add(response, F_LRMD_IPC_SESSION, proxy->session_id); crm_xml_add_int(response, F_LRMD_IPC_MSG_ID, msg_id); add_message_xml(response, F_LRMD_IPC_MSG, msg); lrmd_internal_proxy_send(proxy->lrm, response); free_xml(response); } static void remote_proxy_end_session(remote_proxy_t *proxy) { if (proxy == NULL) { return; } crm_trace("ending session ID %s", proxy->session_id); if (proxy->source) { mainloop_del_ipc_client(proxy->source); } } void remote_proxy_free(gpointer data) { remote_proxy_t *proxy = data; crm_trace("freed proxy session ID %s", proxy->session_id); free(proxy->node_name); free(proxy->session_id); free(proxy); } int remote_proxy_dispatch(const char *buffer, ssize_t length, gpointer userdata) { // Async responses from cib and friends to clients via pacemaker-remoted xmlNode *xml = NULL; uint32_t flags = 0; remote_proxy_t *proxy = userdata; xml = string2xml(buffer); if (xml == NULL) { crm_warn("Received a NULL msg from IPC service."); return 1; } flags = crm_ipc_buffer_flags(proxy->ipc); if (flags & crm_ipc_proxied_relay_response) { crm_trace("Passing response back to %.8s on %s: %.200s - request id: %d", proxy->session_id, proxy->node_name, buffer, proxy->last_request_id); remote_proxy_relay_response(proxy, xml, proxy->last_request_id); proxy->last_request_id = 0; } else { crm_trace("Passing event back to %.8s on %s: %.200s", proxy->session_id, proxy->node_name, buffer); remote_proxy_relay_event(proxy, xml); } free_xml(xml); return 1; } void remote_proxy_disconnected(gpointer userdata) { remote_proxy_t *proxy = userdata; crm_trace("destroying %p", proxy); proxy->source = NULL; proxy->ipc = NULL; if(proxy->lrm) { remote_proxy_notify_destroy(proxy->lrm, proxy->session_id); proxy->lrm = NULL; } g_hash_table_remove(proxy_table, proxy->session_id); } remote_proxy_t * remote_proxy_new(lrmd_t *lrmd, struct ipc_client_callbacks *proxy_callbacks, const char *node_name, const char *session_id, const char *channel) { remote_proxy_t *proxy = NULL; if(channel == NULL) { crm_err("No channel specified to proxy"); remote_proxy_notify_destroy(lrmd, session_id); return NULL; } proxy = calloc(1, sizeof(remote_proxy_t)); proxy->node_name = strdup(node_name); proxy->session_id = strdup(session_id); proxy->lrm = lrmd; if (!strcmp(pcmk_message_name(crm_system_name), CRM_SYSTEM_CRMD) && !strcmp(pcmk_message_name(channel), CRM_SYSTEM_CRMD)) { // The controller doesn't need to connect to itself proxy->is_local = TRUE; } else { proxy->source = mainloop_add_ipc_client(channel, G_PRIORITY_LOW, 0, proxy, proxy_callbacks); proxy->ipc = mainloop_get_ipc_client(proxy->source); if (proxy->source == NULL) { remote_proxy_free(proxy); remote_proxy_notify_destroy(lrmd, session_id); return NULL; } } crm_trace("new remote proxy client established to %s on %s, session id %s", channel, node_name, session_id); g_hash_table_insert(proxy_table, proxy->session_id, proxy); return proxy; } void remote_proxy_cb(lrmd_t *lrmd, const char *node_name, xmlNode *msg) { const char *op = crm_element_value(msg, F_LRMD_IPC_OP); const char *session = crm_element_value(msg, F_LRMD_IPC_SESSION); remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); int msg_id = 0; /* sessions are raw ipc connections to IPC, * all we do is proxy requests/responses exactly * like they are given to us at the ipc level. */ CRM_CHECK(op != NULL, return); CRM_CHECK(session != NULL, return); crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); /* This is msg from remote ipc client going to real ipc server */ if (safe_str_eq(op, LRMD_IPC_OP_DESTROY)) { remote_proxy_end_session(proxy); } else if (safe_str_eq(op, LRMD_IPC_OP_REQUEST)) { int flags = 0; xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG); const char *name = crm_element_value(msg, F_LRMD_IPC_CLIENT); CRM_CHECK(request != NULL, return); if (proxy == NULL) { /* proxy connection no longer exists */ remote_proxy_notify_destroy(lrmd, session); return; } // Controller requests MUST be handled by the controller, not us CRM_CHECK(proxy->is_local == FALSE, remote_proxy_end_session(proxy); return); if (crm_ipc_connected(proxy->ipc) == FALSE) { remote_proxy_end_session(proxy); return; } proxy->last_request_id = 0; crm_element_value_int(msg, F_LRMD_IPC_MSG_FLAGS, &flags); crm_xml_add(request, XML_ACL_TAG_ROLE, "pacemaker-remote"); #if ENABLE_ACL CRM_ASSERT(node_name); pcmk__update_acl_user(request, F_LRMD_IPC_USER, node_name); #endif if(is_set(flags, crm_ipc_proxied)) { const char *type = crm_element_value(request, F_TYPE); int rc = 0; if (safe_str_eq(type, T_ATTRD) && crm_element_value(request, PCMK__XA_ATTR_NODE_NAME) == NULL) { crm_xml_add(request, PCMK__XA_ATTR_NODE_NAME, proxy->node_name); } rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL); if(rc < 0) { xmlNode *op_reply = create_xml_node(NULL, "nack"); crm_err("Could not relay %s request %d from %s to %s for %s: %s (%d)", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name, pcmk_strerror(rc), rc); /* Send a n'ack so the caller doesn't block */ crm_xml_add(op_reply, "function", __FUNCTION__); crm_xml_add_int(op_reply, "line", __LINE__); crm_xml_add_int(op_reply, "rc", rc); remote_proxy_relay_response(proxy, op_reply, msg_id); free_xml(op_reply); } else { crm_trace("Relayed %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); proxy->last_request_id = msg_id; } } else { int rc = pcmk_ok; xmlNode *op_reply = NULL; // @COMPAT pacemaker_remoted <= 1.1.10 crm_trace("Relaying %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); rc = crm_ipc_send(proxy->ipc, request, flags, 10000, &op_reply); if(rc < 0) { crm_err("Could not relay %s request %d from %s to %s for %s: %s (%d)", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name, pcmk_strerror(rc), rc); } else { crm_trace("Relayed %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); } if(op_reply) { remote_proxy_relay_response(proxy, op_reply, msg_id); free_xml(op_reply); } } } else { crm_err("Unknown proxy operation: %s", op); } } diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c index 156f9668a5..2c227672c8 100644 --- a/lib/pacemaker/pcmk_sched_allocate.c +++ b/lib/pacemaker/pcmk_sched_allocate.c @@ -1,3068 +1,3068 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pacemaker); void set_alloc_actions(pe_working_set_t * data_set); extern void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set); extern gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set); static void apply_remote_node_ordering(pe_working_set_t *data_set); static enum remote_connection_state get_remote_node_state(pe_node_t *node); enum remote_connection_state { remote_state_unknown = 0, remote_state_alive = 1, remote_state_resting = 2, remote_state_failed = 3, remote_state_stopped = 4 }; static const char * state2text(enum remote_connection_state state) { switch (state) { case remote_state_unknown: return "unknown"; case remote_state_alive: return "alive"; case remote_state_resting: return "resting"; case remote_state_failed: return "failed"; case remote_state_stopped: return "stopped"; } return "impossible"; } resource_alloc_functions_t resource_class_alloc_functions[] = { { pcmk__native_merge_weights, pcmk__native_allocate, native_create_actions, native_create_probe, native_internal_constraints, native_rsc_colocation_lh, native_rsc_colocation_rh, native_rsc_location, native_action_flags, native_update_actions, native_expand, native_append_meta, }, { pcmk__group_merge_weights, pcmk__group_allocate, group_create_actions, native_create_probe, group_internal_constraints, group_rsc_colocation_lh, group_rsc_colocation_rh, group_rsc_location, group_action_flags, group_update_actions, group_expand, group_append_meta, }, { pcmk__native_merge_weights, pcmk__clone_allocate, clone_create_actions, clone_create_probe, clone_internal_constraints, clone_rsc_colocation_lh, clone_rsc_colocation_rh, clone_rsc_location, clone_action_flags, pcmk__multi_update_actions, clone_expand, clone_append_meta, }, { pcmk__native_merge_weights, pcmk__bundle_allocate, pcmk__bundle_create_actions, pcmk__bundle_create_probe, pcmk__bundle_internal_constraints, pcmk__bundle_rsc_colocation_lh, pcmk__bundle_rsc_colocation_rh, pcmk__bundle_rsc_location, pcmk__bundle_action_flags, pcmk__multi_update_actions, pcmk__bundle_expand, pcmk__bundle_append_meta, } }; gboolean update_action_flags(pe_action_t * action, enum pe_action_flags flags, const char *source, int line) { static unsigned long calls = 0; gboolean changed = FALSE; gboolean clear = is_set(flags, pe_action_clear); enum pe_action_flags last = action->flags; if (clear) { action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags); } else { action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags); } if (last != action->flags) { calls++; changed = TRUE; /* Useful for tracking down _who_ changed a specific flag */ /* CRM_ASSERT(calls != 534); */ clear_bit(flags, pe_action_clear); crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)", action->uuid, action->node ? action->node->details->uname : "[none]", clear ? "un-" : "", flags, last, action->flags, calls, source); } return changed; } static gboolean check_rsc_parameters(pe_resource_t * rsc, pe_node_t * node, xmlNode * rsc_entry, gboolean active_here, pe_working_set_t * data_set) { int attr_lpc = 0; gboolean force_restart = FALSE; gboolean delete_resource = FALSE; gboolean changed = FALSE; const char *value = NULL; const char *old_value = NULL; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; for (; attr_lpc < DIMOF(attr_list); attr_lpc++) { value = crm_element_value(rsc->xml, attr_list[attr_lpc]); old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]); if (value == old_value /* i.e. NULL */ || crm_str_eq(value, old_value, TRUE)) { continue; } changed = TRUE; trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set); if (active_here) { force_restart = TRUE; crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s", rsc->id, node->details->uname, attr_list[attr_lpc], crm_str(old_value), crm_str(value)); } } if (force_restart) { /* make sure the restart happens */ stop_action(rsc, node, FALSE); set_bit(rsc->flags, pe_rsc_start_pending); delete_resource = TRUE; } else if (changed) { delete_resource = TRUE; } return delete_resource; } static void CancelXmlOp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * active_node, const char *reason, pe_working_set_t * data_set) { guint interval_ms = 0; pe_action_t *cancel = NULL; const char *task = NULL; const char *call_id = NULL; CRM_CHECK(xml_op != NULL, return); CRM_CHECK(active_node != NULL, return); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); crm_info("Action " PCMK__OP_FMT " on %s will be stopped: %s", rsc->id, task, interval_ms, active_node->details->uname, (reason? reason : "unknown")); cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set); add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id); custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set); } static gboolean check_action_definition(pe_resource_t * rsc, pe_node_t * active_node, xmlNode * xml_op, pe_working_set_t * data_set) { char *key = NULL; guint interval_ms = 0; const op_digest_cache_t *digest_data = NULL; gboolean did_change = FALSE; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *digest_secure = NULL; CRM_CHECK(active_node != NULL, return FALSE); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if (interval_ms > 0) { xmlNode *op_match = NULL; /* we need to reconstruct the key because of the way we used to construct resource IDs */ key = pcmk__op_key(rsc->id, task, interval_ms); pe_rsc_trace(rsc, "Checking parameters for %s", key); op_match = find_rsc_op_entry(rsc, key); if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) { CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set); free(key); return TRUE; } else if (op_match == NULL) { pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname); free(key); return TRUE; } free(key); key = NULL; } crm_trace("Testing " PCMK__OP_FMT " on %s", rsc->id, task, interval_ms, active_node->details->uname); if ((interval_ms == 0) && safe_str_eq(task, RSC_STATUS)) { /* Reload based on the start action not a probe */ task = RSC_START; } else if ((interval_ms == 0) && safe_str_eq(task, RSC_MIGRATED)) { /* Reload based on the start action not a migrate */ task = RSC_START; } else if ((interval_ms == 0) && safe_str_eq(task, RSC_PROMOTE)) { /* Reload based on the start action not a promote */ task = RSC_START; } digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set); if(is_set(data_set->flags, pe_flag_sanitized)) { digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST); } if(digest_data->rc != RSC_DIGEST_MATCH && digest_secure && digest_data->digest_secure_calc && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) { if (is_set(data_set->flags, pe_flag_stdout)) { printf("Only 'private' parameters to " PCMK__OP_FMT " on %s changed: %s\n", rsc->id, task, interval_ms, active_node->details->uname, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); } } else if (digest_data->rc == RSC_DIGEST_RESTART) { /* Changes that force a restart */ pe_action_t *required = NULL; did_change = TRUE; key = pcmk__op_key(rsc->id, task, interval_ms); crm_log_xml_info(digest_data->params_restart, "params:restart"); required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set); pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL, "resource definition change", pe_action_optional, TRUE); trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set); } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) { /* Changes that can potentially be handled by a reload */ const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); did_change = TRUE; trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set); crm_log_xml_info(digest_data->params_all, "params:reload"); key = pcmk__op_key(rsc->id, task, interval_ms); if (interval_ms > 0) { pe_action_t *op = NULL; #if 0 /* Always reload/restart the entire resource */ ReloadRsc(rsc, active_node, data_set); #else /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */ op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_reschedule); #endif } else if (digest_restart) { pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id); /* Reload this resource */ ReloadRsc(rsc, active_node, data_set); free(key); } else { pe_action_t *required = NULL; pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id); /* Re-send the start/demote/promote op * Recurring ops will be detected independently */ required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set); pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL, "resource definition change", pe_action_optional, TRUE); } } return did_change; } /*! * \internal * \brief Do deferred action checks after allocation * * \param[in] data_set Working set for cluster */ static void check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op, enum pe_check_parameters check, pe_working_set_t *data_set) { const char *reason = NULL; op_digest_cache_t *digest_data = NULL; switch (check) { case pe_check_active: if (check_action_definition(rsc, node, rsc_op, data_set) && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { reason = "action definition changed"; } break; case pe_check_last_failure: digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set); switch (digest_data->rc) { case RSC_DIGEST_UNKNOWN: crm_trace("Resource %s history entry %s on %s has no digest to compare", rsc->id, ID(rsc_op), node->details->id); break; case RSC_DIGEST_MATCH: break; default: reason = "resource parameters have changed"; break; } break; } if (reason) { pe__clear_failcount(rsc, node, reason, data_set); } } static void check_actions_for(xmlNode * rsc_entry, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set) { GListPtr gIter = NULL; int offset = -1; int stop_index = 0; int start_index = 0; const char *task = NULL; xmlNode *rsc_op = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; CRM_CHECK(node != NULL, return); if (is_set(rsc->flags, pe_rsc_orphan)) { pe_resource_t *parent = uber_parent(rsc); if(parent == NULL || pe_rsc_is_clone(parent) == FALSE || is_set(parent->flags, pe_rsc_unique)) { pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id); DeleteRsc(rsc, node, FALSE, data_set); } else { pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id); } return; } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) { if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s", rsc->id, node->details->uname); return; } pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname); if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) { DeleteRsc(rsc, node, FALSE, data_set); } for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; offset++; if (start_index < stop_index) { /* stopped */ continue; } else if (offset < start_index) { /* action occurred prior to a start */ continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms > 0) && (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) { // Maintenance mode cancels recurring operations CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set); } else if ((interval_ms > 0) || safe_str_eq(task, RSC_STATUS) || safe_str_eq(task, RSC_START) || safe_str_eq(task, RSC_PROMOTE) || safe_str_eq(task, RSC_MIGRATED)) { /* If a resource operation failed, and the operation's definition * has changed, clear any fail count so they can be retried fresh. */ if (pe__bundle_needs_remote_name(rsc)) { /* We haven't allocated resources to nodes yet, so if the * REMOTE_CONTAINER_HACK is used, we may calculate the digest * based on the literal "#uname" value rather than the properly * substituted value. That would mistakenly make the action * definition appear to have been changed. Defer the check until * later in this case. */ pe__add_param_check(rsc_op, rsc, node, pe_check_active, data_set); } else if (check_action_definition(rsc, node, rsc_op, data_set) && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { pe__clear_failcount(rsc, node, "action definition changed", data_set); } } } g_list_free(sorted_op_list); } static GListPtr find_rsc_list(GListPtr result, pe_resource_t * rsc, const char *id, gboolean renamed_clones, gboolean partial, pe_working_set_t * data_set) { GListPtr gIter = NULL; gboolean match = FALSE; if (id == NULL) { return NULL; } if (rsc == NULL) { if (data_set == NULL) { return NULL; } for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } return result; } if (partial) { if (strstr(rsc->id, id)) { match = TRUE; } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) { match = TRUE; } } else { if (strcmp(rsc->id, id) == 0) { match = TRUE; } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = TRUE; } } if (match) { result = g_list_prepend(result, rsc); } if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; result = find_rsc_list(result, child, id, renamed_clones, partial, NULL); } } return result; } static void check_actions(pe_working_set_t * data_set) { const char *id = NULL; pe_node_t *node = NULL; xmlNode *lrm_rscs = NULL; xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); xmlNode *node_state = NULL; for (node_state = __xml_first_child_element(status); node_state != NULL; node_state = __xml_next_element(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { id = crm_element_value(node_state, XML_ATTR_ID); lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE); node = pe_find_node_id(data_set->nodes, id); if (node == NULL) { continue; /* Still need to check actions for a maintenance node to cancel existing monitor operations */ } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) { crm_trace("Skipping param check for %s: can't run resources", node->details->uname); continue; } crm_trace("Processing node %s", node->details->uname); if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { xmlNode *rsc_entry = NULL; for (rsc_entry = __xml_first_child_element(lrm_rscs); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { if (xml_has_children(rsc_entry)) { GListPtr gIter = NULL; GListPtr result = NULL; const char *rsc_id = ID(rsc_entry); CRM_CHECK(rsc_id != NULL, return); result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set); for (gIter = result; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; if (rsc->variant != pe_native) { continue; } check_actions_for(rsc_entry, rsc, node, data_set); } g_list_free(result); } } } } } } } static void apply_placement_constraints(pe_working_set_t * data_set) { for (GList *gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { pe__location_t *cons = gIter->data; cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons); } } static gboolean failcount_clear_action_exists(pe_node_t * node, pe_resource_t * rsc) { gboolean rc = FALSE; GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE); if (list) { rc = TRUE; } g_list_free(list); return rc; } /*! * \internal * \brief Force resource away if failures hit migration threshold * * \param[in,out] rsc Resource to check for failures * \param[in,out] node Node to check for failures * \param[in,out] data_set Cluster working set to update */ static void check_migration_threshold(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { int fail_count, countdown; pe_resource_t *failed; /* Migration threshold of 0 means never force away */ if (rsc->migration_threshold == 0) { return; } // If we're ignoring failures, also ignore the migration threshold if (is_set(rsc->flags, pe_rsc_failure_ignored)) { return; } /* If there are no failures, there's no need to force away */ fail_count = pe_get_failcount(node, rsc, NULL, pe_fc_effective|pe_fc_fillers, NULL, data_set); if (fail_count <= 0) { return; } /* How many more times recovery will be tried on this node */ countdown = QB_MAX(rsc->migration_threshold - fail_count, 0); /* If failed resource has a parent, we'll force the parent away */ failed = rsc; if (is_not_set(rsc->flags, pe_rsc_unique)) { failed = uber_parent(rsc); } if (countdown == 0) { resource_location(failed, node, -INFINITY, "__fail_limit__", data_set); crm_warn("Forcing %s away from %s after %d failures (max=%d)", failed->id, node->details->uname, fail_count, rsc->migration_threshold); } else { crm_info("%s can fail %d more times on %s before being forced off", failed->id, countdown, node->details->uname); } } static void common_apply_stickiness(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set) { if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; common_apply_stickiness(child_rsc, node, data_set); } return; } if (is_set(rsc->flags, pe_rsc_managed) && rsc->stickiness != 0 && pcmk__list_of_1(rsc->running_on)) { pe_node_t *current = pe_find_node_id(rsc->running_on, node->details->id); pe_node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (current == NULL) { } else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) { pe_resource_t *sticky_rsc = rsc; resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set); pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location" " (node=%s, weight=%d)", sticky_rsc->id, node->details->uname, rsc->stickiness); } else { GHashTableIter iter; pe_node_t *nIter = NULL; pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric" " and node %s is not explicitly allowed", rsc->id, node->details->uname); g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) { crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight); } } } /* Check the migration threshold only if a failcount clear action * has not already been placed for this resource on the node. * There is no sense in potentially forcing the resource from this * node if the failcount is being reset anyway. * * @TODO A clear_failcount operation can be scheduled in stage4() via * check_actions_for(), or in stage5() via check_params(). This runs in * stage2(), so it cannot detect those, meaning we might check the migration * threshold when we shouldn't -- worst case, we stop or move the resource, * then move it back next transition. */ if (failcount_clear_action_exists(node, rsc) == FALSE) { check_migration_threshold(rsc, node, data_set); } } void complex_set_cmds(pe_resource_t * rsc) { GListPtr gIter = rsc->children; rsc->cmds = &resource_class_alloc_functions[rsc->variant]; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; complex_set_cmds(child_rsc); } } void set_alloc_actions(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; complex_set_cmds(rsc); } } static void calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data) { const char *key = (const char *)gKey; const char *value = (const char *)gValue; int *system_health = (int *)user_data; if (!gKey || !gValue || !user_data) { return; } if (pcmk__starts_with(key, "#health")) { int score; /* Convert the value into an integer */ score = char2score(value); /* Add it to the running total */ *system_health = merge_weights(score, *system_health); } } static gboolean apply_system_health(pe_working_set_t * data_set) { GListPtr gIter = NULL; const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy"); int base_health = 0; if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) { /* Prevent any accidental health -> score translation */ - node_score_red = 0; - node_score_yellow = 0; - node_score_green = 0; + pcmk__score_red = 0; + pcmk__score_yellow = 0; + pcmk__score_green = 0; return TRUE; } else if (safe_str_eq(health_strategy, "migrate-on-red")) { /* Resources on nodes which have health values of red are * weighted away from that node. */ - node_score_red = -INFINITY; - node_score_yellow = 0; - node_score_green = 0; + pcmk__score_red = -INFINITY; + pcmk__score_yellow = 0; + pcmk__score_green = 0; } else if (safe_str_eq(health_strategy, "only-green")) { /* Resources on nodes which have health values of red or yellow * are forced away from that node. */ - node_score_red = -INFINITY; - node_score_yellow = -INFINITY; - node_score_green = 0; + pcmk__score_red = -INFINITY; + pcmk__score_yellow = -INFINITY; + pcmk__score_green = 0; } else if (safe_str_eq(health_strategy, "progressive")) { /* Same as the above, but use the r/y/g scores provided by the user * Defaults are provided by the pe_prefs table * Also, custom health "base score" can be used */ base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0"); } else if (safe_str_eq(health_strategy, "custom")) { /* Requires the admin to configure the rsc_location constaints for * processing the stored health scores */ /* TODO: Check for the existence of appropriate node health constraints */ return TRUE; } else { crm_err("Unknown node health strategy: %s", health_strategy); return FALSE; } crm_info("Applying automated node health strategy: %s", health_strategy); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { int system_health = base_health; pe_node_t *node = (pe_node_t *) gIter->data; /* Search through the node hash table for system health entries. */ g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health); crm_info(" Node %s has an combined system health of %d", node->details->uname, system_health); /* If the health is non-zero, then create a new rsc2node so that the * weight will be added later on. */ if (system_health != 0) { GListPtr gIter2 = data_set->resources; for (; gIter2 != NULL; gIter2 = gIter2->next) { pe_resource_t *rsc = (pe_resource_t *) gIter2->data; rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set); } } } return TRUE; } gboolean stage0(pe_working_set_t * data_set) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); if (data_set->input == NULL) { return FALSE; } if (is_set(data_set->flags, pe_flag_have_status) == FALSE) { crm_trace("Calculating status"); cluster_status(data_set); } set_alloc_actions(data_set); apply_system_health(data_set); unpack_constraints(cib_constraints, data_set); return TRUE; } /* * Check nodes for resources started outside of the LRM */ gboolean probe_resources(pe_working_set_t * data_set) { pe_action_t *probe_node_complete = NULL; for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED); if (node->details->online == FALSE) { if (pe__is_remote_node(node) && node->details->remote_rsc && (get_remote_node_state(node) == remote_state_failed)) { pe_fence_node(data_set, node, "the connection is unrecoverable", FALSE); } continue; } else if (node->details->unclean) { continue; } else if (node->details->rsc_discovery_enabled == FALSE) { /* resource discovery is disabled for this node */ continue; } if (probed != NULL && crm_is_true(probed) == FALSE) { pe_action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname), CRM_OP_REPROBE, node, FALSE, TRUE, data_set); add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); continue; } for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) { pe_resource_t *rsc = (pe_resource_t *) gIter2->data; rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set); } } return TRUE; } static void rsc_discover_filter(pe_resource_t *rsc, pe_node_t *node) { GListPtr gIter = rsc->children; pe_resource_t *top = uber_parent(rsc); pe_node_t *match; if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) { return; } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; rsc_discover_filter(child_rsc, node); } match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match && match->rsc_discover_mode != pe_discover_exclusive) { match->weight = -INFINITY; } } static time_t shutdown_time(pe_node_t *node, pe_working_set_t *data_set) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); time_t result = 0; if (shutdown) { errno = 0; result = (time_t) crm_parse_ll(shutdown, NULL); if (errno != 0) { result = 0; } } return result? result : get_effective_time(data_set); } static void apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set) { const char *class; // Only primitives and (uncloned) groups may be locked if (rsc->variant == pe_group) { for (GList *item = rsc->children; item != NULL; item = item->next) { apply_shutdown_lock((pe_resource_t *) item->data, data_set); } } else if (rsc->variant != pe_native) { return; } // Fence devices and remote connections can't be locked class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if ((class == NULL) || !strcmp(class, PCMK_RESOURCE_CLASS_STONITH) || pe__resource_is_remote_conn(rsc, data_set)) { return; } if (rsc->lock_node != NULL) { // The lock was obtained from resource history if (rsc->running_on != NULL) { /* The resource was started elsewhere even though it is now * considered locked. This shouldn't be possible, but as a * failsafe, we don't want to disturb the resource now. */ pe_rsc_info(rsc, "Cancelling shutdown lock because %s is already active", rsc->id); pe__clear_resource_history(rsc, rsc->lock_node, data_set); rsc->lock_node = NULL; rsc->lock_time = 0; } // Only a resource active on exactly one node can be locked } else if (pcmk__list_of_1(rsc->running_on)) { pe_node_t *node = rsc->running_on->data; if (node->details->shutdown) { if (node->details->unclean) { pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown", rsc->id, node->details->uname); } else { rsc->lock_node = node; rsc->lock_time = shutdown_time(node, data_set); } } } if (rsc->lock_node == NULL) { // No lock needed return; } if (data_set->shutdown_lock > 0) { time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock; pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)", rsc->id, rsc->lock_node->details->uname, (long long) lock_expiration); pe__update_recheck_time(++lock_expiration, data_set); } else { pe_rsc_info(rsc, "Locking %s to %s due to shutdown", rsc->id, rsc->lock_node->details->uname); } // If resource is locked to one node, ban it from all other nodes for (GList *item = data_set->nodes; item != NULL; item = item->next) { pe_node_t *node = item->data; if (strcmp(node->details->uname, rsc->lock_node->details->uname)) { resource_location(rsc, node, -CRM_SCORE_INFINITY, XML_CONFIG_ATTR_SHUTDOWN_LOCK, data_set); } } } /* * \internal * \brief Stage 2 of cluster status: apply node-specific criteria * * Count known nodes, and apply location constraints, stickiness, and exclusive * resource discovery. */ gboolean stage2(pe_working_set_t * data_set) { GListPtr gIter = NULL; if (is_set(data_set->flags, pe_flag_shutdown_lock)) { for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { apply_shutdown_lock((pe_resource_t *) gIter->data, data_set); } } if (is_not_set(data_set->flags, pe_flag_no_compat)) { // @COMPAT API backward compatibility for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; if (node && (node->weight >= 0) && node->details->online && (node->details->type != node_ping)) { data_set->max_valid_nodes++; } } } crm_trace("Applying placement constraints"); apply_placement_constraints(data_set); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { GListPtr gIter2 = NULL; pe_node_t *node = (pe_node_t *) gIter->data; gIter2 = data_set->resources; for (; gIter2 != NULL; gIter2 = gIter2->next) { pe_resource_t *rsc = (pe_resource_t *) gIter2->data; common_apply_stickiness(rsc, node, data_set); rsc_discover_filter(rsc, node); } } return TRUE; } /* * Create internal resource constraints before allocation */ gboolean stage3(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; rsc->cmds->internal_constraints(rsc, data_set); } return TRUE; } /* * Check for orphaned or redefined actions */ gboolean stage4(pe_working_set_t * data_set) { check_actions(data_set); return TRUE; } static void * convert_const_pointer(const void *ptr) { /* Worst function ever */ return (void *)ptr; } static gint sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data) { int rc = 0; int r1_weight = -INFINITY; int r2_weight = -INFINITY; const char *reason = "existence"; const GListPtr nodes = (GListPtr) data; const pe_resource_t *resource1 = a; const pe_resource_t *resource2 = b; pe_node_t *r1_node = NULL; pe_node_t *r2_node = NULL; GListPtr gIter = NULL; GHashTable *r1_nodes = NULL; GHashTable *r2_nodes = NULL; if (a == NULL && b == NULL) { goto done; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } reason = "priority"; r1_weight = resource1->priority; r2_weight = resource2->priority; if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } reason = "no node list"; if (nodes == NULL) { goto done; } r1_nodes = pcmk__native_merge_weights(convert_const_pointer(resource1), resource1->id, NULL, NULL, 1, pe_weights_forward | pe_weights_init); pe__show_node_weights(true, NULL, resource1->id, r1_nodes); r2_nodes = pcmk__native_merge_weights(convert_const_pointer(resource2), resource2->id, NULL, NULL, 1, pe_weights_forward | pe_weights_init); pe__show_node_weights(true, NULL, resource2->id, r2_nodes); /* Current location score */ reason = "current location"; r1_weight = -INFINITY; r2_weight = -INFINITY; if (resource1->running_on) { r1_node = pe__current_node(resource1); r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id); if (r1_node != NULL) { r1_weight = r1_node->weight; } } if (resource2->running_on) { r2_node = pe__current_node(resource2); r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id); if (r2_node != NULL) { r2_weight = r2_node->weight; } } if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } reason = "score"; for (gIter = nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; r1_node = NULL; r2_node = NULL; r1_weight = -INFINITY; if (r1_nodes) { r1_node = g_hash_table_lookup(r1_nodes, node->details->id); } if (r1_node) { r1_weight = r1_node->weight; } r2_weight = -INFINITY; if (r2_nodes) { r2_node = g_hash_table_lookup(r2_nodes, node->details->id); } if (r2_node) { r2_weight = r2_node->weight; } if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } } done: crm_trace("%s (%d) on %s %c %s (%d) on %s: %s", resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a", rc < 0 ? '>' : rc > 0 ? '<' : '=', resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason); if (r1_nodes) { g_hash_table_destroy(r1_nodes); } if (r2_nodes) { g_hash_table_destroy(r2_nodes); } return rc; } static void allocate_resources(pe_working_set_t * data_set) { GListPtr gIter = NULL; if (is_set(data_set->flags, pe_flag_have_remote_nodes)) { /* Allocate remote connection resources first (which will also allocate * any colocation dependencies). If the connection is migrating, always * prefer the partial migration target. */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; if (rsc->is_remote_node == FALSE) { continue; } pe_rsc_trace(rsc, "Allocating remote connection resource '%s'", rsc->id); rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set); } } /* now do the rest of the resources */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; if (rsc->is_remote_node == TRUE) { continue; } pe_rsc_trace(rsc, "Allocating resource '%s'", rsc->id); rsc->cmds->allocate(rsc, NULL, data_set); } } /* We always use pe_order_preserve with these convenience functions to exempt * internally generated constraints from the prohibition of user constraints * involving remote connection resources. * * The start ordering additionally uses pe_order_runnable_left so that the * specified action is not runnable if the start is not runnable. */ static inline void order_start_then_action(pe_resource_t *lh_rsc, pe_action_t *rh_action, enum pe_ordering extra, pe_working_set_t *data_set) { if (lh_rsc && rh_action && data_set) { custom_action_order(lh_rsc, start_key(lh_rsc), NULL, rh_action->rsc, NULL, rh_action, pe_order_preserve | pe_order_runnable_left | extra, data_set); } } static inline void order_action_then_stop(pe_action_t *lh_action, pe_resource_t *rh_rsc, enum pe_ordering extra, pe_working_set_t *data_set) { if (lh_action && rh_rsc && data_set) { custom_action_order(lh_action->rsc, NULL, lh_action, rh_rsc, stop_key(rh_rsc), NULL, pe_order_preserve | extra, data_set); } } // Clear fail counts for orphaned rsc on all online nodes static void cleanup_orphans(pe_resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; if (node->details->online && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { pe_action_t *clear_op = NULL; clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set); /* We can't use order_action_then_stop() here because its * pe_order_preserve breaks things */ custom_action_order(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc), NULL, pe_order_optional, data_set); } } } gboolean stage5(pe_working_set_t * data_set) { GListPtr gIter = NULL; int log_prio = show_utilization? LOG_STDOUT : utilization_log_level; if (safe_str_neq(data_set->placement_strategy, "default")) { GListPtr nodes = g_list_copy(data_set->nodes); nodes = sort_nodes_by_weight(nodes, NULL, data_set); data_set->resources = g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes); g_list_free(nodes); } gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; dump_node_capacity(log_prio, "Original", node); } crm_trace("Allocating services"); /* Take (next) highest resource, assign it and create its actions */ allocate_resources(data_set); gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; dump_node_capacity(log_prio, "Remaining", node); } // Process deferred action checks pe__foreach_param_check(data_set, check_params); pe__free_param_checks(data_set); if (is_set(data_set->flags, pe_flag_startup_probes)) { crm_trace("Calculating needed probes"); /* This code probably needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=100: With probes: ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints 36s ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph Without probes: ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph */ probe_resources(data_set); } crm_trace("Handle orphans"); if (is_set(data_set->flags, pe_flag_stop_rsc_orphans)) { for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; /* There's no need to recurse into rsc->children because those * should just be unallocated clone instances. */ if (is_set(rsc->flags, pe_rsc_orphan)) { cleanup_orphans(rsc, data_set); } } } crm_trace("Creating actions"); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; rsc->cmds->create_actions(rsc, data_set); } crm_trace("Creating done"); return TRUE; } static gboolean is_managed(const pe_resource_t * rsc) { GListPtr gIter = rsc->children; if (is_set(rsc->flags, pe_rsc_managed)) { return TRUE; } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (is_managed(child_rsc)) { return TRUE; } } return FALSE; } static gboolean any_managed_resources(pe_working_set_t * data_set) { GListPtr gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; if (is_managed(rsc)) { return TRUE; } } return FALSE; } /*! * \internal * \brief Create pseudo-op for guest node fence, and order relative to it * * \param[in] node Guest node to fence * \param[in] data_set Working set of CIB state */ static void fence_guest(pe_node_t *node, pe_working_set_t *data_set) { pe_resource_t *container = node->details->remote_rsc->container; pe_action_t *stop = NULL; pe_action_t *stonith_op = NULL; /* The fence action is just a label; we don't do anything differently for * off vs. reboot. We specify it explicitly, rather than let it default to * cluster's default action, because we are not _initiating_ fencing -- we * are creating a pseudo-event to describe fencing that is already occurring * by other means (container recovery). */ const char *fence_action = "off"; /* Check whether guest's container resource has any explicit stop or * start (the stop may be implied by fencing of the guest's host). */ if (container) { stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL); if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) { fence_action = "reboot"; } } /* Create a fence pseudo-event, so we have an event to order actions * against, and the controller can always detect it. */ stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", FALSE, data_set); update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable, __FUNCTION__, __LINE__); /* We want to imply stops/demotes after the guest is stopped, not wait until * it is restarted, so we always order pseudo-fencing after stop, not start * (even though start might be closer to what is done for a real reboot). */ if(stop && is_set(stop->flags, pe_action_pseudo)) { pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, FALSE, data_set); crm_info("Implying guest node %s is down (action %d) after %s fencing", node->details->uname, stonith_op->id, stop->node->details->uname); order_actions(parent_stonith_op, stonith_op, pe_order_runnable_left|pe_order_implies_then); } else if (stop) { order_actions(stop, stonith_op, pe_order_runnable_left|pe_order_implies_then); crm_info("Implying guest node %s is down (action %d) " "after container %s is stopped (action %d)", node->details->uname, stonith_op->id, container->id, stop->id); } else { /* If we're fencing the guest node but there's no stop for the guest * resource, we must think the guest is already stopped. However, we may * think so because its resource history was just cleaned. To avoid * unnecessarily considering the guest node down if it's really up, * order the pseudo-fencing after any stop of the connection resource, * which will be ordered after any container (re-)probe. */ stop = find_first_action(node->details->remote_rsc->actions, NULL, RSC_STOP, NULL); if (stop) { order_actions(stop, stonith_op, pe_order_optional); crm_info("Implying guest node %s is down (action %d) " "after connection is stopped (action %d)", node->details->uname, stonith_op->id, stop->id); } else { /* Not sure why we're fencing, but everything must already be * cleanly stopped. */ crm_info("Implying guest node %s is down (action %d) ", node->details->uname, stonith_op->id); } } /* Order/imply other actions relative to pseudo-fence as with real fence */ pcmk__order_vs_fence(stonith_op, data_set); } /* * Create dependencies for stonith and shutdown operations */ gboolean stage6(pe_working_set_t * data_set) { pe_action_t *dc_down = NULL; pe_action_t *stonith_op = NULL; gboolean integrity_lost = FALSE; gboolean need_stonith = TRUE; GListPtr gIter; GListPtr stonith_ops = NULL; GList *shutdown_ops = NULL; /* Remote ordering constraints need to happen prior to calculating fencing * because it is one more place we will mark the node as dirty. * * A nice side effect of doing them early is that apply_*_ordering() can be * simpler because pe_fence_node() has already done some of the work. */ crm_trace("Creating remote ordering constraints"); apply_remote_node_ordering(data_set); crm_trace("Processing fencing and shutdown cases"); if (any_managed_resources(data_set) == FALSE) { crm_notice("Delaying fencing operations until there are resources to manage"); need_stonith = FALSE; } /* Check each node for stonith/shutdown */ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; /* Guest nodes are "fenced" by recovering their container resource, * so handle them separately. */ if (pe__is_guest_node(node)) { if (node->details->remote_requires_reset && need_stonith && pe_can_fence(data_set, node)) { fence_guest(node, data_set); } continue; } stonith_op = NULL; if (node->details->unclean && need_stonith && pe_can_fence(data_set, node)) { stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set); pe_warn("Scheduling Node %s for STONITH", node->details->uname); pcmk__order_vs_fence(stonith_op, data_set); if (node->details->is_dc) { // Remember if the DC is being fenced dc_down = stonith_op; } else { if (is_not_set(data_set->flags, pe_flag_concurrent_fencing) && (stonith_ops != NULL)) { /* Concurrent fencing is disabled, so order each non-DC * fencing in a chain. If there is any DC fencing or * shutdown, it will be ordered after the last action in the * chain later. */ order_actions((pe_action_t *) stonith_ops->data, stonith_op, pe_order_optional); } // Remember all non-DC fencing actions in a separate list stonith_ops = g_list_prepend(stonith_ops, stonith_op); } } else if (node->details->online && node->details->shutdown && /* TODO define what a shutdown op means for a remote node. * For now we do not send shutdown operations for remote nodes, but * if we can come up with a good use for this in the future, we will. */ pe__is_guest_or_remote_node(node) == FALSE) { pe_action_t *down_op = sched_shutdown_op(node, data_set); if (node->details->is_dc) { // Remember if the DC is being shut down dc_down = down_op; } else { // Remember non-DC shutdowns for later ordering shutdown_ops = g_list_prepend(shutdown_ops, down_op); } } if (node->details->unclean && stonith_op == NULL) { integrity_lost = TRUE; pe_warn("Node %s is unclean!", node->details->uname); } } if (integrity_lost) { if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED"); pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE"); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) { crm_notice("Cannot fence unclean nodes until quorum is" " attained (or no-quorum-policy is set to ignore)"); } } if (dc_down != NULL) { /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated * DC elections. However, we don't want to order non-DC shutdowns before * a DC *fencing*, because even though we don't want a node that's * shutting down to become DC, the DC fencing could be ordered before a * clone stop that's also ordered before the shutdowns, thus leading to * a graph loop. */ if (safe_str_eq(dc_down->task, CRM_OP_SHUTDOWN)) { for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) { pe_action_t *node_stop = (pe_action_t *) gIter->data; crm_debug("Ordering shutdown on %s before %s on DC %s", node_stop->node->details->uname, dc_down->task, dc_down->node->details->uname); order_actions(node_stop, dc_down, pe_order_optional); } } // Order any non-DC fencing before any DC fencing or shutdown if (is_set(data_set->flags, pe_flag_concurrent_fencing)) { /* With concurrent fencing, order each non-DC fencing action * separately before any DC fencing or shutdown. */ for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) { order_actions((pe_action_t *) gIter->data, dc_down, pe_order_optional); } } else if (stonith_ops) { /* Without concurrent fencing, the non-DC fencing actions are * already ordered relative to each other, so we just need to order * the DC fencing after the last action in the chain (which is the * first item in the list). */ order_actions((pe_action_t *) stonith_ops->data, dc_down, pe_order_optional); } } g_list_free(stonith_ops); g_list_free(shutdown_ops); return TRUE; } /* * Determine the sets of independent actions and the correct order for the * actions in each set. * * Mark dependencies of un-runnable actions un-runnable * */ static GListPtr find_actions_by_task(GListPtr actions, pe_resource_t * rsc, const char *original_key) { GListPtr list = NULL; list = find_actions(actions, original_key, NULL); if (list == NULL) { /* we're potentially searching a child of the original resource */ char *key = NULL; char *task = NULL; guint interval_ms = 0; if (parse_op_key(original_key, NULL, &task, &interval_ms)) { key = pcmk__op_key(rsc->id, task, interval_ms); list = find_actions(actions, key, NULL); } else { crm_err("search key: %s", original_key); } free(key); free(task); } return list; } static void rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc, pe__ordering_t *order) { GListPtr gIter = NULL; GListPtr rh_actions = NULL; pe_action_t *rh_action = NULL; enum pe_ordering type; CRM_CHECK(rsc != NULL, return); CRM_CHECK(order != NULL, return); type = order->type; rh_action = order->rh_action; crm_trace("Processing RH of ordering constraint %d", order->id); if (rh_action != NULL) { rh_actions = g_list_prepend(NULL, rh_action); } else if (rsc != NULL) { rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task); } if (rh_actions == NULL) { pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..." " ignoring", rsc->id, order->rh_action_task); if (lh_action) { pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid); } return; } if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) { pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid, order->rh_action_task); clear_bit(type, pe_order_implies_then); } gIter = rh_actions; for (; gIter != NULL; gIter = gIter->next) { pe_action_t *rh_action_iter = (pe_action_t *) gIter->data; if (lh_action) { order_actions(lh_action, rh_action_iter, type); } else if (type & pe_order_implies_then) { update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__); crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type); } else { crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type); } } g_list_free(rh_actions); } static void rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order, pe_working_set_t *data_set) { GListPtr gIter = NULL; GListPtr lh_actions = NULL; pe_action_t *lh_action = order->lh_action; pe_resource_t *rh_rsc = order->rh_rsc; crm_trace("Processing LH of ordering constraint %d", order->id); CRM_ASSERT(lh_rsc != NULL); if (lh_action != NULL) { lh_actions = g_list_prepend(NULL, lh_action); } else { lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task); } if (lh_actions == NULL && lh_rsc != rh_rsc) { char *key = NULL; char *op_type = NULL; guint interval_ms = 0; parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms); key = pcmk__op_key(lh_rsc->id, op_type, interval_ms); if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) { free(key); pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) { free(key); pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); } else { pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set); lh_actions = g_list_prepend(NULL, lh_action); } free(op_type); } gIter = lh_actions; for (; gIter != NULL; gIter = gIter->next) { pe_action_t *lh_action_iter = (pe_action_t *) gIter->data; if (rh_rsc == NULL && order->rh_action) { rh_rsc = order->rh_action->rsc; } if (rh_rsc) { rsc_order_then(lh_action_iter, rh_rsc, order); } else if (order->rh_action) { order_actions(lh_action_iter, order->rh_action, order->type); } } g_list_free(lh_actions); } extern void update_colo_start_chain(pe_action_t *action, pe_working_set_t *data_set); static int is_recurring_action(pe_action_t *action) { guint interval_ms; if (pcmk__guint_from_hash(action->meta, XML_LRM_ATTR_INTERVAL_MS, 0, &interval_ms) != pcmk_rc_ok) { return 0; } return (interval_ms > 0); } static void apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set) { /* VMs are also classified as containers for these purposes... in * that they both involve a 'thing' running on a real or remote * cluster node. * * This allows us to be smarter about the type and extent of * recovery actions required in various scenarios */ pe_resource_t *remote_rsc = NULL; pe_resource_t *container = NULL; enum action_tasks task = text2task(action->task); CRM_ASSERT(action->rsc); CRM_ASSERT(action->node); CRM_ASSERT(pe__is_guest_or_remote_node(action->node)); remote_rsc = action->node->details->remote_rsc; CRM_ASSERT(remote_rsc); container = remote_rsc->container; CRM_ASSERT(container); if(is_set(container->flags, pe_rsc_failed)) { pe_fence_node(data_set, action->node, "container failed", FALSE); } crm_trace("Order %s action %s relative to %s%s for %s%s", action->task, action->uuid, is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", remote_rsc->id, is_set(container->flags, pe_rsc_failed)? "failed " : "", container->id); if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) { /* Migration ops map to "no_action", but we need to apply the same * ordering as for stop or demote (see get_router_node()). */ task = stop_rsc; } switch (task) { case start_rsc: case action_promote: /* Force resource recovery if the container is recovered */ order_start_then_action(container, action, pe_order_implies_then, data_set); /* Wait for the connection resource to be up too */ order_start_then_action(remote_rsc, action, pe_order_none, data_set); break; case stop_rsc: case action_demote: if (is_set(container->flags, pe_rsc_failed)) { /* When the container representing a guest node fails, any stop * or demote actions for resources running on the guest node * are implied by the container stopping. This is similar to * how fencing operations work for cluster nodes and remote * nodes. */ } else { /* Ensure the operation happens before the connection is brought * down. * * If we really wanted to, we could order these after the * connection start, IFF the container's current role was * stopped (otherwise we re-introduce an ordering loop when the * connection is restarting). */ order_action_then_stop(action, remote_rsc, pe_order_none, data_set); } break; default: /* Wait for the connection resource to be up */ if (is_recurring_action(action)) { /* In case we ever get the recovery logic wrong, force * recurring monitors to be restarted, even if just * the connection was re-established */ if(task != no_action) { order_start_then_action(remote_rsc, action, pe_order_implies_then, data_set); } } else { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } break; } } static enum remote_connection_state get_remote_node_state(pe_node_t *node) { pe_resource_t *remote_rsc = NULL; pe_node_t *cluster_node = NULL; CRM_ASSERT(node); remote_rsc = node->details->remote_rsc; CRM_ASSERT(remote_rsc); cluster_node = pe__current_node(remote_rsc); /* If the cluster node the remote connection resource resides on * is unclean or went offline, we can't process any operations * on that remote node until after it starts elsewhere. */ if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) { /* The connection resource is not going to run anywhere */ if (cluster_node && cluster_node->details->unclean) { /* The remote connection is failed because its resource is on a * failed node and can't be recovered elsewhere, so we must fence. */ return remote_state_failed; } if (is_not_set(remote_rsc->flags, pe_rsc_failed)) { /* Connection resource is cleanly stopped */ return remote_state_stopped; } /* Connection resource is failed */ if ((remote_rsc->next_role == RSC_ROLE_STOPPED) && remote_rsc->remote_reconnect_ms && node->details->remote_was_fenced && !pe__shutdown_requested(node)) { /* We won't know whether the connection is recoverable until the * reconnect interval expires and we reattempt connection. */ return remote_state_unknown; } /* The remote connection is in a failed state. If there are any * resources known to be active on it (stop) or in an unknown state * (probe), we must assume the worst and fence it. */ return remote_state_failed; } else if (cluster_node == NULL) { /* Connection is recoverable but not currently running anywhere, see if we can recover it first */ return remote_state_unknown; } else if(cluster_node->details->unclean == TRUE || cluster_node->details->online == FALSE) { /* Connection is running on a dead node, see if we can recover it first */ return remote_state_resting; } else if (pcmk__list_of_multiple(remote_rsc->running_on) && remote_rsc->partial_migration_source && remote_rsc->partial_migration_target) { /* We're in the middle of migrating a connection resource, * wait until after the resource migrates before performing * any actions. */ return remote_state_resting; } return remote_state_alive; } /*! * \internal * \brief Order actions on remote node relative to actions for the connection */ static void apply_remote_ordering(pe_action_t *action, pe_working_set_t *data_set) { pe_resource_t *remote_rsc = NULL; enum action_tasks task = text2task(action->task); enum remote_connection_state state = get_remote_node_state(action->node); enum pe_ordering order_opts = pe_order_none; if (action->rsc == NULL) { return; } CRM_ASSERT(action->node); CRM_ASSERT(pe__is_guest_or_remote_node(action->node)); remote_rsc = action->node->details->remote_rsc; CRM_ASSERT(remote_rsc); crm_trace("Order %s action %s relative to %s%s (state: %s)", action->task, action->uuid, is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "", remote_rsc->id, state2text(state)); if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE) || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) { /* Migration ops map to "no_action", but we need to apply the same * ordering as for stop or demote (see get_router_node()). */ task = stop_rsc; } switch (task) { case start_rsc: case action_promote: order_opts = pe_order_none; if (state == remote_state_failed) { /* Force recovery, by making this action required */ order_opts |= pe_order_implies_then; } /* Ensure connection is up before running this action */ order_start_then_action(remote_rsc, action, order_opts, data_set); break; case stop_rsc: if(state == remote_state_alive) { order_action_then_stop(action, remote_rsc, pe_order_implies_first, data_set); } else if(state == remote_state_failed) { /* The resource is active on the node, but since we don't have a * valid connection, the only way to stop the resource is by * fencing the node. There is no need to order the stop relative * to the remote connection, since the stop will become implied * by the fencing. */ pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable", FALSE); } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) { /* State must be remote_state_unknown or remote_state_stopped. * Since the connection is not coming back up in this * transition, stop this resource first. */ order_action_then_stop(action, remote_rsc, pe_order_implies_first, data_set); } else { /* The connection is going to be started somewhere else, so * stop this resource after that completes. */ order_start_then_action(remote_rsc, action, pe_order_none, data_set); } break; case action_demote: /* Only order this demote relative to the connection start if the * connection isn't being torn down. Otherwise, the demote would be * blocked because the connection start would not be allowed. */ if(state == remote_state_resting || state == remote_state_unknown) { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } /* Otherwise we can rely on the stop ordering */ break; default: /* Wait for the connection resource to be up */ if (is_recurring_action(action)) { /* In case we ever get the recovery logic wrong, force * recurring monitors to be restarted, even if just * the connection was re-established */ order_start_then_action(remote_rsc, action, pe_order_implies_then, data_set); } else { pe_node_t *cluster_node = pe__current_node(remote_rsc); if(task == monitor_rsc && state == remote_state_failed) { /* We would only be here if we do not know the * state of the resource on the remote node. * Since we have no way to find out, it is * necessary to fence the node. */ pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable", FALSE); } if(cluster_node && state == remote_state_stopped) { /* The connection is currently up, but is going * down permanently. * * Make sure we check services are actually * stopped _before_ we let the connection get * closed */ order_action_then_stop(action, remote_rsc, pe_order_runnable_left, data_set); } else { order_start_then_action(remote_rsc, action, pe_order_none, data_set); } } break; } } static void apply_remote_node_ordering(pe_working_set_t *data_set) { if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) { return; } for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; pe_resource_t *remote = NULL; // We are only interested in resource actions if (action->rsc == NULL) { continue; } /* Special case: If we are clearing the failcount of an actual * remote connection resource, then make sure this happens before * any start of the resource in this transition. */ if (action->rsc->is_remote_node && safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { custom_action_order(action->rsc, NULL, action, action->rsc, pcmk__op_key(action->rsc->id, RSC_START, 0), NULL, pe_order_optional, data_set); continue; } // We are only interested in actions allocated to a node if (action->node == NULL) { continue; } if (!pe__is_guest_or_remote_node(action->node)) { continue; } /* We are only interested in real actions. * * @TODO This is probably wrong; pseudo-actions might be converted to * real actions and vice versa later in update_actions() at the end of * stage7(). */ if (is_set(action->flags, pe_action_pseudo)) { continue; } remote = action->node->details->remote_rsc; if (remote == NULL) { // Orphaned continue; } /* Another special case: if a resource is moving to a Pacemaker Remote * node, order the stop on the original node after any start of the * remote connection. This ensures that if the connection fails to * start, we leave the resource running on the original node. */ if (safe_str_eq(action->task, RSC_START)) { for (GList *item = action->rsc->actions; item != NULL; item = item->next) { pe_action_t *rsc_action = item->data; if ((rsc_action->node->details != action->node->details) && safe_str_eq(rsc_action->task, RSC_STOP)) { custom_action_order(remote, start_key(remote), NULL, action->rsc, NULL, rsc_action, pe_order_optional, data_set); } } } /* The action occurs across a remote connection, so create * ordering constraints that guarantee the action occurs while the node * is active (after start, before stop ... things like that). * * This is somewhat brittle in that we need to make sure the results of * this ordering are compatible with the result of get_router_node(). * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part * of this logic rather than action2xml(). */ if (remote->container) { crm_trace("Container ordering for %s", action->uuid); apply_container_ordering(action, data_set); } else { crm_trace("Remote ordering for %s", action->uuid); apply_remote_ordering(action, data_set); } } } static gboolean order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action) { /* No need to probe the resource on the node that is being * unfenced. Otherwise it might introduce transition loop * since probe will be performed after the node is * unfenced. */ if (safe_str_eq(rh_action->task, CRM_OP_FENCE) && probe->node && rh_action->node && probe->node->details == rh_action->node->details) { const char *op = g_hash_table_lookup(rh_action->meta, "stonith_action"); if (safe_str_eq(op, "on")) { return TRUE; } } // Shutdown waits for probe to complete only if it's on the same node if ((safe_str_eq(rh_action->task, CRM_OP_SHUTDOWN)) && probe->node && rh_action->node && probe->node->details != rh_action->node->details) { return TRUE; } return FALSE; } static void order_first_probes_imply_stops(pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) { pe__ordering_t *order = gIter->data; enum pe_ordering order_type = pe_order_optional; pe_resource_t *lh_rsc = order->lh_rsc; pe_resource_t *rh_rsc = order->rh_rsc; pe_action_t *lh_action = order->lh_action; pe_action_t *rh_action = order->rh_action; const char *lh_action_task = order->lh_action_task; const char *rh_action_task = order->rh_action_task; GListPtr probes = NULL; GListPtr rh_actions = NULL; GListPtr pIter = NULL; if (lh_rsc == NULL) { continue; } else if (rh_rsc && lh_rsc == rh_rsc) { continue; } if (lh_action == NULL && lh_action_task == NULL) { continue; } if (rh_action == NULL && rh_action_task == NULL) { continue; } /* Technically probe is expected to return "not running", which could be * the alternative of stop action if the status of the resource is * unknown yet. */ if (lh_action && safe_str_neq(lh_action->task, RSC_STOP)) { continue; } else if (lh_action == NULL && lh_action_task && !pcmk__ends_with(lh_action_task, "_" RSC_STOP "_0")) { continue; } /* Do not probe the resource inside of a stopping container. Otherwise * it might introduce transition loop since probe will be performed * after the container starts again. */ if (rh_rsc && lh_rsc->container == rh_rsc) { if (rh_action && safe_str_eq(rh_action->task, RSC_STOP)) { continue; } else if (rh_action == NULL && rh_action_task && pcmk__ends_with(rh_action_task,"_" RSC_STOP "_0")) { continue; } } if (order->type == pe_order_none) { continue; } // Preserve the order options for future filtering if (is_set(order->type, pe_order_apply_first_non_migratable)) { set_bit(order_type, pe_order_apply_first_non_migratable); } if (is_set(order->type, pe_order_same_node)) { set_bit(order_type, pe_order_same_node); } // Keep the order types for future filtering if (order->type == pe_order_anti_colocation || order->type == pe_order_load) { order_type = order->type; } probes = pe__resource_actions(lh_rsc, NULL, RSC_STATUS, FALSE); if (probes == NULL) { continue; } if (rh_action) { rh_actions = g_list_prepend(rh_actions, rh_action); } else if (rh_rsc && rh_action_task) { rh_actions = find_actions(rh_rsc->actions, rh_action_task, NULL); } if (rh_actions == NULL) { g_list_free(probes); continue; } crm_trace("Processing for LH probe based on ordering constraint %s -> %s" " (id=%d, type=%.6x)", lh_action ? lh_action->uuid : lh_action_task, rh_action ? rh_action->uuid : rh_action_task, order->id, order->type); for (pIter = probes; pIter != NULL; pIter = pIter->next) { pe_action_t *probe = (pe_action_t *) pIter->data; GListPtr rIter = NULL; for (rIter = rh_actions; rIter != NULL; rIter = rIter->next) { pe_action_t *rh_action_iter = (pe_action_t *) rIter->data; if (order_first_probe_unneeded(probe, rh_action_iter)) { continue; } order_actions(probe, rh_action_iter, order_type); } } g_list_free(rh_actions); g_list_free(probes); } } static void order_first_probe_then_restart_repromote(pe_action_t * probe, pe_action_t * after, pe_working_set_t * data_set) { GListPtr gIter = NULL; bool interleave = FALSE; pe_resource_t *compatible_rsc = NULL; if (probe == NULL || probe->rsc == NULL || probe->rsc->variant != pe_native) { return; } if (after == NULL // Avoid running into any possible loop || is_set(after->flags, pe_action_tracking)) { return; } if (safe_str_neq(probe->task, RSC_STATUS)) { return; } pe_set_action_bit(after, pe_action_tracking); crm_trace("Processing based on %s %s -> %s %s", probe->uuid, probe->node ? probe->node->details->uname: "", after->uuid, after->node ? after->node->details->uname : ""); if (after->rsc /* Better not build a dependency directly with a clone/group. * We are going to proceed through the ordering chain and build * dependencies with its children. */ && after->rsc->variant == pe_native && probe->rsc != after->rsc) { GListPtr then_actions = NULL; enum pe_ordering probe_order_type = pe_order_optional; if (safe_str_eq(after->task, RSC_START)) { then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE); } else if (safe_str_eq(after->task, RSC_PROMOTE)) { then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE); } for (gIter = then_actions; gIter != NULL; gIter = gIter->next) { pe_action_t *then = (pe_action_t *) gIter->data; // Skip any pseudo action which for example is implied by fencing if (is_set(then->flags, pe_action_pseudo)) { continue; } order_actions(probe, then, probe_order_type); } g_list_free(then_actions); } if (after->rsc && after->rsc->variant > pe_group) { const char *interleave_s = g_hash_table_lookup(after->rsc->meta, XML_RSC_ATTR_INTERLEAVE); interleave = crm_is_true(interleave_s); if (interleave) { /* For an interleaved clone, we should build a dependency only * with the relevant clone child. */ compatible_rsc = find_compatible_child(probe->rsc, after->rsc, RSC_ROLE_UNKNOWN, FALSE, data_set); } } for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) { pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) gIter->data; /* pe_order_implies_then is the reason why a required A.start * implies/enforces B.start to be required too, which is the cause of * B.restart/re-promote. * * Not sure about pe_order_implies_then_on_node though. It's now only * used for unfencing case, which tends to introduce transition * loops... */ if (is_not_set(after_wrapper->type, pe_order_implies_then)) { /* The order type between a group/clone and its child such as * B.start-> B_child.start is: * pe_order_implies_first_printed | pe_order_runnable_left * * Proceed through the ordering chain and build dependencies with * its children. */ if (after->rsc == NULL || after->rsc->variant < pe_group || probe->rsc->parent == after->rsc || after_wrapper->action->rsc == NULL || after_wrapper->action->rsc->variant > pe_group || after->rsc != after_wrapper->action->rsc->parent) { continue; } /* Proceed to the children of a group or a non-interleaved clone. * For an interleaved clone, proceed only to the relevant child. */ if (after->rsc->variant > pe_group && interleave == TRUE && (compatible_rsc == NULL || compatible_rsc != after_wrapper->action->rsc)) { continue; } } crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)", after->uuid, after->node ? after->node->details->uname: "", after_wrapper->action->uuid, after_wrapper->action->node ? after_wrapper->action->node->details->uname : "", after_wrapper->type); order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set); } } static void clear_actions_tracking_flag(pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (is_set(action->flags, pe_action_tracking)) { pe_clear_action_bit(action, pe_action_tracking); } } } static void order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set) { GListPtr gIter = NULL; GListPtr probes = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t * child = (pe_resource_t *) gIter->data; order_first_rsc_probes(child, data_set); } if (rsc->variant != pe_native) { return; } probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE); for (gIter = probes; gIter != NULL; gIter= gIter->next) { pe_action_t *probe = (pe_action_t *) gIter->data; GListPtr aIter = NULL; for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) { pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) aIter->data; order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set); clear_actions_tracking_flag(data_set); } } g_list_free(probes); } static void order_first_probes(pe_working_set_t * data_set) { GListPtr gIter = NULL; for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; order_first_rsc_probes(rsc, data_set); } order_first_probes_imply_stops(data_set); } static void order_then_probes(pe_working_set_t * data_set) { #if 0 GListPtr gIter = NULL; for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; /* Given "A then B", we would prefer to wait for A to be * started before probing B. * * If A was a filesystem on which the binaries and data for B * lived, it would have been useful if the author of B's agent * could assume that A is running before B.monitor will be * called. * * However we can't _only_ probe once A is running, otherwise * we'd not detect the state of B if A could not be started * for some reason. * * In practice however, we cannot even do an opportunistic * version of this because B may be moving: * * B.probe -> B.start * B.probe -> B.stop * B.stop -> B.start * A.stop -> A.start * A.start -> B.probe * * So far so good, but if we add the result of this code: * * B.stop -> A.stop * * Then we get a loop: * * B.probe -> B.stop -> A.stop -> A.start -> B.probe * * We could kill the 'B.probe -> B.stop' dependency, but that * could mean stopping B "too" soon, because B.start must wait * for the probes to complete. * * Another option is to allow it only if A is a non-unique * clone with clone-max == node-max (since we'll never be * moving it). However, we could still be stopping one * instance at the same time as starting another. * The complexity of checking for allowed conditions combined * with the ever narrowing usecase suggests that this code * should remain disabled until someone gets smarter. */ pe_action_t *start = NULL; GListPtr actions = NULL; GListPtr probes = NULL; actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE); if (actions) { start = actions->data; g_list_free(actions); } if(start == NULL) { crm_err("No start action for %s", rsc->id); continue; } probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE); for (actions = start->actions_before; actions != NULL; actions = actions->next) { pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data; GListPtr pIter = NULL; pe_action_t *first = before->action; pe_resource_t *first_rsc = first->rsc; if(first->required_runnable_before) { GListPtr clone_actions = NULL; for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) { before = (pe_action_wrapper_t *) clone_actions->data; crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid); CRM_ASSERT(before->action->rsc); first_rsc = before->action->rsc; break; } } else if(safe_str_neq(first->task, RSC_START)) { crm_trace("Not a start op %s for %s", first->uuid, start->uuid); } if(first_rsc == NULL) { continue; } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) { crm_trace("Same parent %s for %s", first_rsc->id, start->uuid); continue; } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) { crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid); continue; } crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant); for (pIter = probes; pIter != NULL; pIter = pIter->next) { pe_action_t *probe = (pe_action_t *) pIter->data; crm_err("Ordering %s before %s", first->uuid, probe->uuid); order_actions(first, probe, pe_order_optional); } } } #endif } static void order_probes(pe_working_set_t * data_set) { order_first_probes(data_set); order_then_probes(data_set); } gboolean stage7(pe_working_set_t * data_set) { GList *gIter = NULL; crm_trace("Applying ordering constraints"); /* Don't ask me why, but apparently they need to be processed in * the order they were created in... go figure * * Also g_list_append() has horrendous performance characteristics * So we need to use g_list_prepend() and then reverse the list here */ data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints); for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) { pe__ordering_t *order = gIter->data; pe_resource_t *rsc = order->lh_rsc; crm_trace("Applying ordering constraint: %d", order->id); if (rsc != NULL) { crm_trace("rsc_action-to-*"); rsc_order_first(rsc, order, data_set); continue; } rsc = order->rh_rsc; if (rsc != NULL) { crm_trace("action-to-rsc_action"); rsc_order_then(order->lh_action, rsc, order); } else { crm_trace("action-to-action"); order_actions(order->lh_action, order->rh_action, order->type); } } for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; update_colo_start_chain(action, data_set); } crm_trace("Ordering probes"); order_probes(data_set); crm_trace("Updating %d actions", g_list_length(data_set->actions)); for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; update_action(action, data_set); } // Check for invalid orderings for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; pe_action_wrapper_t *input = NULL; for (GList *input_iter = action->actions_before; input_iter != NULL; input_iter = input_iter->next) { input = (pe_action_wrapper_t *) input_iter->data; if (pcmk__ordering_is_invalid(action, input)) { input->type = pe_order_none; } } } LogNodeActions(data_set, FALSE); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; LogActions(rsc, data_set, FALSE); } return TRUE; } static int transition_id = -1; /*! * \internal * \brief Log a message after calculating a transition * * \param[in] filename Where transition input is stored */ void pcmk__log_transition_summary(const char *filename) { if (was_processing_error) { crm_err("Calculated transition %d (with errors), saving inputs in %s", transition_id, filename); } else if (was_processing_warning) { crm_warn("Calculated transition %d (with warnings), saving inputs in %s", transition_id, filename); } else { crm_notice("Calculated transition %d, saving inputs in %s", transition_id, filename); } if (crm_config_error) { crm_notice("Configuration errors found during scheduler processing," " please run \"crm_verify -L\" to identify issues"); } } /* * Create a dependency graph to send to the transitioner (via the controller) */ gboolean stage8(pe_working_set_t * data_set) { GListPtr gIter = NULL; const char *value = NULL; transition_id++; crm_trace("Creating transition graph %d.", transition_id); data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH); value = pe_pref(data_set->config_hash, "cluster-delay"); crm_xml_add(data_set->graph, "cluster-delay", value); value = pe_pref(data_set->config_hash, "stonith-timeout"); crm_xml_add(data_set->graph, "stonith-timeout", value); crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY"); if (is_set(data_set->flags, pe_flag_start_failure_fatal)) { crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY"); } else { crm_xml_add(data_set->graph, "failed-start-offset", "1"); } value = pe_pref(data_set->config_hash, "batch-limit"); crm_xml_add(data_set->graph, "batch-limit", value); crm_xml_add_int(data_set->graph, "transition_id", transition_id); value = pe_pref(data_set->config_hash, "migration-limit"); if (crm_parse_ll(value, NULL) > 0) { crm_xml_add(data_set->graph, "migration-limit", value); } if (data_set->recheck_by > 0) { char *recheck_epoch = NULL; recheck_epoch = crm_strdup_printf("%llu", (long long) data_set->recheck_by); crm_xml_add(data_set->graph, "recheck-by", recheck_epoch); free(recheck_epoch); } /* errors... slist_iter(action, pe_action_t, action_list, lpc, if(action->optional == FALSE && action->runnable == FALSE) { print_action("Ignoring", action, TRUE); } ); */ /* The following code will de-duplicate action inputs, so nothing past this * should rely on the action input type flags retaining their original * values. */ gIter = data_set->resources; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id); rsc->cmds->expand(rsc, data_set); } crm_log_xml_trace(data_set->graph, "created resource-driven action list"); /* pseudo action to distribute list of nodes with maintenance state update */ add_maintenance_update(data_set); /* catch any non-resource specific actions */ crm_trace("processing non-resource actions"); gIter = data_set->actions; for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (action->rsc && action->node && action->node->details->shutdown && is_not_set(action->rsc->flags, pe_rsc_maintenance) && is_not_set(action->flags, pe_action_optional) && is_not_set(action->flags, pe_action_runnable) && crm_str_eq(action->task, RSC_STOP, TRUE) ) { /* Eventually we should just ignore the 'fence' case * But for now it's the best way to detect (in CTS) when * CIB resource updates are being lost */ if (is_set(data_set->flags, pe_flag_have_quorum) || data_set->no_quorum_policy == no_quorum_ignore) { crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)", action->node->details->unclean ? "fence" : "shut down", action->node->details->uname, action->rsc->id, is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked", is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "", action->uuid); } } graph_element_from_action(action, data_set); } crm_log_xml_trace(data_set->graph, "created generic action list"); crm_trace("Created transition graph %d.", transition_id); return TRUE; } void LogNodeActions(pe_working_set_t * data_set, gboolean terminal) { GListPtr gIter = NULL; for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { char *node_name = NULL; char *task = NULL; pe_action_t *action = (pe_action_t *) gIter->data; if (action->rsc != NULL) { continue; } else if (is_set(action->flags, pe_action_optional)) { continue; } if (pe__is_guest_node(action->node)) { node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id); } else if(action->node) { node_name = crm_strdup_printf("%s", action->node->details->uname); } if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { task = strdup("Shutdown"); } else if (safe_str_eq(action->task, CRM_OP_FENCE)) { const char *op = g_hash_table_lookup(action->meta, "stonith_action"); task = crm_strdup_printf("Fence (%s)", op); } if(task == NULL) { /* Nothing to report */ } else if(terminal && action->reason) { printf(" * %s %s '%s'\n", task, node_name, action->reason); } else if(terminal) { printf(" * %s %s\n", task, node_name); } else if(action->reason) { crm_notice(" * %s %s '%s'\n", task, node_name, action->reason); } else { crm_notice(" * %s %s\n", task, node_name); } free(node_name); free(task); } } diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index fee6b9938c..aa51f9e02d 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,3873 +1,3873 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_status); #define set_config_flag(data_set, option, flag) do { \ const char *tmp = pe_pref(data_set->config_hash, option); \ if(tmp) { \ if(crm_is_true(tmp)) { \ set_bit(data_set->flags, flag); \ } else { \ clear_bit(data_set->flags, flag); \ } \ } \ } while(0) static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *failed, pe_working_set_t *data_set); -static gboolean determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node); +static void determine_remote_online_status(pe_working_set_t *data_set, + pe_node_t *this_node); static void add_node_attrs(xmlNode *attrs, pe_node_t *node, bool overwrite, pe_working_set_t *data_set); +static void determine_online_status(xmlNode *node_state, pe_node_t *this_node, + pe_working_set_t *data_set); // Bitmask for warnings we only want to print once uint32_t pe_wo = 0; static gboolean is_dangling_guest_node(pe_node_t *node) { /* we are looking for a remote-node that was supposed to be mapped to a * container resource, but all traces of that container have disappeared * from both the config and the status section. */ if (pe__is_guest_or_remote_node(node) && node->details->remote_rsc && node->details->remote_rsc->container == NULL && is_set(node->details->remote_rsc->flags, pe_rsc_orphan_container_filler)) { return TRUE; } return FALSE; } /*! * \brief Schedule a fence action for a node * * \param[in,out] data_set Current working set of cluster * \param[in,out] node Node to fence * \param[in] reason Text description of why fencing is needed * \param[in] priority_delay Whether to consider `priority-fencing-delay` */ void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay) { CRM_CHECK(node, return); /* A guest node is fenced by marking its container as failed */ if (pe__is_guest_node(node)) { pe_resource_t *rsc = node->details->remote_rsc->container; if (is_set(rsc->flags, pe_rsc_failed) == FALSE) { if (!is_set(rsc->flags, pe_rsc_managed)) { crm_notice("Not fencing guest node %s " "(otherwise would because %s): " "its guest resource %s is unmanaged", node->details->uname, reason, rsc->id); } else { crm_warn("Guest node %s will be fenced " "(by recovering its guest resource %s): %s", node->details->uname, rsc->id, reason); /* We don't mark the node as unclean because that would prevent the * node from running resources. We want to allow it to run resources * in this transition if the recovery succeeds. */ node->details->remote_requires_reset = TRUE; set_bit(rsc->flags, pe_rsc_failed); } } } else if (is_dangling_guest_node(node)) { crm_info("Cleaning up dangling connection for guest node %s: " "fencing was already done because %s, " "and guest resource no longer exists", node->details->uname, reason); set_bit(node->details->remote_rsc->flags, pe_rsc_failed); } else if (pe__is_remote_node(node)) { pe_resource_t *rsc = node->details->remote_rsc; if (rsc && (!is_set(rsc->flags, pe_rsc_managed))) { crm_notice("Not fencing remote node %s " "(otherwise would because %s): connection is unmanaged", node->details->uname, reason); } else if(node->details->remote_requires_reset == FALSE) { node->details->remote_requires_reset = TRUE; crm_warn("Remote node %s %s: %s", node->details->uname, pe_can_fence(data_set, node)? "will be fenced" : "is unclean", reason); } node->details->unclean = TRUE; // No need to apply `priority-fencing-delay` for remote nodes pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set); } else if (node->details->unclean) { crm_trace("Cluster node %s %s because %s", node->details->uname, pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean", reason); } else { crm_warn("Cluster node %s %s: %s", node->details->uname, pe_can_fence(data_set, node)? "will be fenced" : "is unclean", reason); node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set); } } // @TODO xpaths can't handle templates, rules, or id-refs // nvpair with provides or requires set to unfencing #define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR \ "[(@" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_PROVIDES "'" \ "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \ "and @" XML_NVPAIR_ATTR_VALUE "='unfencing']" // unfencing in rsc_defaults or any resource #define XPATH_ENABLE_UNFENCING \ "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \ "//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR \ "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG \ "/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR static void set_if_xpath(unsigned long long flag, const char *xpath, pe_working_set_t *data_set) { xmlXPathObjectPtr result = NULL; if (is_not_set(data_set->flags, flag)) { result = xpath_search(data_set->input, xpath); if (result && (numXpathResults(result) > 0)) { set_bit(data_set->flags, flag); } freeXpathObject(result); } } gboolean unpack_config(xmlNode * config, pe_working_set_t * data_set) { const char *value = NULL; GHashTable *config_hash = crm_str_table_new(); data_set->config_hash = config_hash; pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, data_set); verify_pe_options(data_set->config_hash); set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes); if(is_not_set(data_set->flags, pe_flag_startup_probes)) { crm_info("Startup probes: disabled (dangerous)"); } value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG); if (value && crm_is_true(value)) { crm_notice("Watchdog will be used via SBD if fencing is required " "and stonith-watchdog-timeout is nonzero"); set_bit(data_set->flags, pe_flag_have_stonith_resource); } /* Set certain flags via xpath here, so they can be used before the relevant * configuration sections are unpacked. */ set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set); value = pe_pref(data_set->config_hash, "stonith-timeout"); data_set->stonith_timeout = (int) crm_parse_interval_spec(value); crm_debug("STONITH timeout: %d", data_set->stonith_timeout); set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled); crm_debug("STONITH of failed nodes is %s", is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled"); data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action"); if (!strcmp(data_set->stonith_action, "poweroff")) { pe_warn_once(pe_wo_poweroff, "Support for stonith-action of 'poweroff' is deprecated " "and will be removed in a future release (use 'off' instead)"); data_set->stonith_action = "off"; } crm_trace("STONITH will %s nodes", data_set->stonith_action); set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing); crm_debug("Concurrent fencing is %s", is_set(data_set->flags, pe_flag_concurrent_fencing) ? "enabled" : "disabled"); // Default value -1 means `priority-fencing-delay` is disabled data_set->priority_fencing_delay = -1; value = pe_pref(data_set->config_hash, XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY); if (value) { data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000; crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay); } set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything); crm_debug("Stop all active resources: %s", is_set(data_set->flags, pe_flag_stop_everything) ? "true" : "false"); set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster); if (is_set(data_set->flags, pe_flag_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pe_pref(data_set->config_hash, "no-quorum-policy"); if (safe_str_eq(value, "ignore")) { data_set->no_quorum_policy = no_quorum_ignore; } else if (safe_str_eq(value, "freeze")) { data_set->no_quorum_policy = no_quorum_freeze; } else if (safe_str_eq(value, "suicide")) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { int do_panic = 0; crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC, &do_panic); if (do_panic || is_set(data_set->flags, pe_flag_have_quorum)) { data_set->no_quorum_policy = no_quorum_suicide; } else { crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum"); data_set->no_quorum_policy = no_quorum_stop; } } else { pcmk__config_err("Resetting no-quorum-policy to 'stop' because " "fencing is disabled"); data_set->no_quorum_policy = no_quorum_stop; } } else { data_set->no_quorum_policy = no_quorum_stop; } switch (data_set->no_quorum_policy) { case no_quorum_freeze: crm_debug("On loss of quorum: Freeze resources"); break; case no_quorum_stop: crm_debug("On loss of quorum: Stop ALL resources"); break; case no_quorum_suicide: crm_notice("On loss of quorum: Fence all remaining nodes"); break; case no_quorum_ignore: crm_notice("On loss of quorum: Ignore"); break; } set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans); crm_trace("Orphan resources are %s", is_set(data_set->flags, pe_flag_stop_rsc_orphans) ? "stopped" : "ignored"); set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans); crm_trace("Orphan resource actions are %s", is_set(data_set->flags, pe_flag_stop_action_orphans) ? "stopped" : "ignored"); set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop); crm_trace("Stopped resources are removed from the status section: %s", is_set(data_set->flags, pe_flag_remove_after_stop) ? "true" : "false"); set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode); crm_trace("Maintenance mode: %s", is_set(data_set->flags, pe_flag_maintenance_mode) ? "true" : "false"); set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal); crm_trace("Start failures are %s", is_set(data_set->flags, pe_flag_start_failure_fatal) ? "always fatal" : "handled by failcount"); if (is_set(data_set->flags, pe_flag_stonith_enabled)) { set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing); } if (is_set(data_set->flags, pe_flag_startup_fencing)) { crm_trace("Unseen nodes will be fenced"); } else { pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes"); } - node_score_red = char2score(pe_pref(data_set->config_hash, "node-health-red")); - node_score_green = char2score(pe_pref(data_set->config_hash, "node-health-green")); - node_score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow")); + pcmk__score_red = char2score(pe_pref(data_set->config_hash, "node-health-red")); + pcmk__score_green = char2score(pe_pref(data_set->config_hash, "node-health-green")); + pcmk__score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow")); crm_debug("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s", pe_pref(data_set->config_hash, "node-health-red"), pe_pref(data_set->config_hash, "node-health-yellow"), pe_pref(data_set->config_hash, "node-health-green")); data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy"); crm_trace("Placement strategy: %s", data_set->placement_strategy); set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock); crm_trace("Resources will%s be locked to cleanly shut down nodes", (is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not")); if (is_set(data_set->flags, pe_flag_shutdown_lock)) { value = pe_pref(data_set->config_hash, XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT); data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000; crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock); } return TRUE; } static void destroy_digest_cache(gpointer ptr) { op_digest_cache_t *data = ptr; free_xml(data->params_all); free_xml(data->params_secure); free_xml(data->params_restart); free(data->digest_all_calc); free(data->digest_restart_calc); free(data->digest_secure_calc); free(data); } pe_node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t * data_set) { pe_node_t *new_node = NULL; if (pe_find_node(data_set->nodes, uname) != NULL) { pcmk__config_warn("More than one node entry has name '%s'", uname); } new_node = calloc(1, sizeof(pe_node_t)); if (new_node == NULL) { return NULL; } new_node->weight = char2score(score); new_node->fixed = FALSE; new_node->details = calloc(1, sizeof(struct pe_node_shared_s)); if (new_node->details == NULL) { free(new_node); return NULL; } crm_trace("Creating node for entry %s/%s", uname, id); new_node->details->id = id; new_node->details->uname = uname; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->rsc_discovery_enabled = TRUE; new_node->details->running_rsc = NULL; new_node->details->type = node_ping; if (safe_str_eq(type, "remote")) { new_node->details->type = node_remote; set_bit(data_set->flags, pe_flag_have_remote_nodes); } else if ((type == NULL) || safe_str_eq(type, "member")) { new_node->details->type = node_member; } new_node->details->attrs = crm_str_table_new(); if (pe__is_guest_or_remote_node(new_node)) { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("remote")); } else { g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("cluster")); } new_node->details->utilization = crm_str_table_new(); new_node->details->digest_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_digest_cache); data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node, sort_node_uname); return new_node; } static const char * expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data) { xmlNode *attr_set = NULL; xmlNode *attr = NULL; const char *container_id = ID(xml_obj); const char *remote_name = NULL; const char *remote_server = NULL; const char *remote_port = NULL; const char *connect_timeout = "60s"; const char *remote_allow_migrate=NULL; const char *is_managed = NULL; for (attr_set = __xml_first_child_element(xml_obj); attr_set != NULL; attr_set = __xml_next_element(attr_set)) { if (safe_str_neq((const char *)attr_set->name, XML_TAG_META_SETS)) { continue; } for (attr = __xml_first_child_element(attr_set); attr != NULL; attr = __xml_next_element(attr)) { const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); if (safe_str_eq(name, XML_RSC_ATTR_REMOTE_NODE)) { remote_name = value; } else if (safe_str_eq(name, "remote-addr")) { remote_server = value; } else if (safe_str_eq(name, "remote-port")) { remote_port = value; } else if (safe_str_eq(name, "remote-connect-timeout")) { connect_timeout = value; } else if (safe_str_eq(name, "remote-allow-migrate")) { remote_allow_migrate=value; } else if (safe_str_eq(name, XML_RSC_ATTR_MANAGED)) { is_managed = value; } } } if (remote_name == NULL) { return NULL; } if (pe_find_resource(data->resources, remote_name) != NULL) { return NULL; } pe_create_remote_xml(parent, remote_name, container_id, remote_allow_migrate, is_managed, connect_timeout, remote_server, remote_port); return remote_name; } static void handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node) { if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) { /* Ignore fencing for remote nodes that don't have a connection resource * associated with them. This happens when remote node entries get left * in the nodes section after the connection resource is removed. */ return; } if (is_set(data_set->flags, pe_flag_startup_fencing)) { // All nodes are unclean until we've seen their status entry new_node->details->unclean = TRUE; } else { // Blind faith ... new_node->details->unclean = FALSE; } /* We need to be able to determine if a node's status section * exists or not separate from whether the node is unclean. */ new_node->details->unseen = TRUE; } gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; pe_node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; const char *score = NULL; for (xml_obj = __xml_first_child_element(xml_nodes); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, TRUE)) { new_node = NULL; id = crm_element_value(xml_obj, XML_ATTR_ID); uname = crm_element_value(xml_obj, XML_ATTR_UNAME); type = crm_element_value(xml_obj, XML_ATTR_TYPE); score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); crm_trace("Processing node %s/%s", uname, id); if (id == NULL) { pcmk__config_err("Ignoring <" XML_CIB_TAG_NODE "> entry in configuration without id"); continue; } new_node = pe_create_node(id, uname, type, score, data_set); if (new_node == NULL) { return FALSE; } /* if(data_set->have_quorum == FALSE */ /* && data_set->no_quorum_policy == no_quorum_stop) { */ /* /\* start shutting resources down *\/ */ /* new_node->weight = -INFINITY; */ /* } */ handle_startup_fencing(data_set, new_node); add_node_attrs(xml_obj, new_node, FALSE, data_set); pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, NULL, new_node->details->utilization, NULL, FALSE, data_set); crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME)); } } if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) { crm_info("Creating a fake local node"); pe_create_node(data_set->localhost, data_set->localhost, NULL, 0, data_set); } return TRUE; } static void setup_container(pe_resource_t * rsc, pe_working_set_t * data_set) { const char *container_id = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; setup_container(child_rsc, data_set); } return; } container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER); if (container_id && safe_str_neq(container_id, rsc->id)) { pe_resource_t *container = pe_find_resource(data_set->resources, container_id); if (container) { rsc->container = container; set_bit(container->flags, pe_rsc_is_container); container->fillers = g_list_append(container->fillers, rsc); pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id); } else { pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id); } } } gboolean unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; /* Create remote nodes and guest nodes from the resource configuration * before unpacking resources. */ for (xml_obj = __xml_first_child_element(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { const char *new_node_id = NULL; /* Check for remote nodes, which are defined by ocf:pacemaker:remote * primitives. */ if (xml_contains_remote_node(xml_obj)) { new_node_id = ID(xml_obj); /* The "pe_find_node" check is here to make sure we don't iterate over * an expanded node that has already been added to the node list. */ if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found remote node %s defined by resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } continue; } /* Check for guest nodes, which are defined by special meta-attributes * of a primitive of any type (for example, VirtualDomain or Xen). */ if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, TRUE)) { /* This will add an ocf:pacemaker:remote primitive to the * configuration for the guest node's connection, to be unpacked * later. */ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set); if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found guest node %s in resource %s", new_node_id, ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } continue; } /* Check for guest nodes inside a group. Clones are currently not * supported as guest nodes. */ if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, TRUE)) { xmlNode *xml_obj2 = NULL; for (xml_obj2 = __xml_first_child_element(xml_obj); xml_obj2 != NULL; xml_obj2 = __xml_next_element(xml_obj2)) { new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set); if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) { crm_trace("Found guest node %s in resource %s inside group %s", new_node_id, ID(xml_obj2), ID(xml_obj)); pe_create_node(new_node_id, new_node_id, "remote", NULL, data_set); } } } } return TRUE; } /* Call this after all the nodes and resources have been * unpacked, but before the status section is read. * * A remote node's online status is reflected by the state * of the remote node's connection resource. We need to link * the remote node to this connection resource so we can have * easy access to the connection resource during the scheduler calculations. */ static void link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc) { pe_node_t *remote_node = NULL; if (new_rsc->is_remote_node == FALSE) { return; } if (is_set(data_set->flags, pe_flag_quick_location)) { /* remote_nodes and remote_resources are not linked in quick location calculations */ return; } remote_node = pe_find_node(data_set->nodes, new_rsc->id); CRM_CHECK(remote_node != NULL, return;); pe_rsc_trace(new_rsc, "Linking remote connection resource %s to node %s", new_rsc->id, remote_node->details->uname); remote_node->details->remote_rsc = new_rsc; if (new_rsc->container == NULL) { /* Handle start-up fencing for remote nodes (as opposed to guest nodes) * the same as is done for cluster nodes. */ handle_startup_fencing(data_set, remote_node); } else { /* pe_create_node() marks the new node as "remote" or "cluster"; now * that we know the node is a guest node, update it correctly. */ g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); } } static void destroy_tag(gpointer data) { pe_tag_t *tag = data; if (tag) { free(tag->id); g_list_free_full(tag->refs, free); free(tag); } } /*! * \internal * \brief Parse configuration XML for resource information * * \param[in] xml_resources Top of resource configuration XML * \param[in,out] data_set Where to put resource information * * \return TRUE * * \note unpack_remote_nodes() MUST be called before this, so that the nodes can * be used when common_unpack() calls resource_location() */ gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; GListPtr gIter = NULL; data_set->template_rsc_sets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_tag); for (xml_obj = __xml_first_child_element(xml_resources); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { pe_resource_t *new_rsc = NULL; if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, TRUE)) { const char *template_id = ID(xml_obj); if (template_id && g_hash_table_lookup_extended(data_set->template_rsc_sets, template_id, NULL, NULL) == FALSE) { /* Record the template's ID for the knowledge of its existence anyway. */ g_hash_table_insert(data_set->template_rsc_sets, strdup(template_id), NULL); } continue; } crm_trace("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj)); if (common_unpack(xml_obj, &new_rsc, NULL, data_set)) { data_set->resources = g_list_append(data_set->resources, new_rsc); pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id); } else { pcmk__config_err("Ignoring <%s> resource '%s' " "because configuration is invalid", crm_element_name(xml_obj), crm_str(ID(xml_obj))); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } } } for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; setup_container(rsc, data_set); link_rsc2remotenode(data_set, rsc); } data_set->resources = g_list_sort(data_set->resources, sort_rsc_priority); if (is_set(data_set->flags, pe_flag_quick_location)) { /* Ignore */ } else if (is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined"); pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option"); pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } gboolean unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set) { xmlNode *xml_tag = NULL; data_set->tags = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_tag); for (xml_tag = __xml_first_child_element(xml_tags); xml_tag != NULL; xml_tag = __xml_next_element(xml_tag)) { xmlNode *xml_obj_ref = NULL; const char *tag_id = ID(xml_tag); if (crm_str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, TRUE) == FALSE) { continue; } if (tag_id == NULL) { pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID, crm_element_name(xml_tag)); continue; } for (xml_obj_ref = __xml_first_child_element(xml_tag); xml_obj_ref != NULL; xml_obj_ref = __xml_next_element(xml_obj_ref)) { const char *obj_ref = ID(xml_obj_ref); if (crm_str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, TRUE) == FALSE) { continue; } if (obj_ref == NULL) { pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID, crm_element_name(xml_obj_ref), tag_id); continue; } if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) { return FALSE; } } } return TRUE; } /* The ticket state section: * "/cib/status/tickets/ticket_state" */ static gboolean unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set) { const char *ticket_id = NULL; const char *granted = NULL; const char *last_granted = NULL; const char *standby = NULL; xmlAttrPtr xIter = NULL; pe_ticket_t *ticket = NULL; ticket_id = ID(xml_ticket); if (ticket_id == NULL || strlen(ticket_id) == 0) { return FALSE; } crm_trace("Processing ticket state for %s", ticket_id); ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = ticket_new(ticket_id, data_set); if (ticket == NULL) { return FALSE; } } for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_ticket, prop_name); if (crm_str_eq(prop_name, XML_ATTR_ID, TRUE)) { continue; } g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value)); } granted = g_hash_table_lookup(ticket->state, "granted"); if (granted && crm_is_true(granted)) { ticket->granted = TRUE; crm_info("We have ticket '%s'", ticket->id); } else { ticket->granted = FALSE; crm_info("We do not have ticket '%s'", ticket->id); } last_granted = g_hash_table_lookup(ticket->state, "last-granted"); if (last_granted) { ticket->last_granted = crm_parse_int(last_granted, 0); } standby = g_hash_table_lookup(ticket->state, "standby"); if (standby && crm_is_true(standby)) { ticket->standby = TRUE; if (ticket->granted) { crm_info("Granted ticket '%s' is in standby-mode", ticket->id); } } else { ticket->standby = FALSE; } crm_trace("Done with ticket state for %s", ticket_id); return TRUE; } static gboolean unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set) { xmlNode *xml_obj = NULL; for (xml_obj = __xml_first_child_element(xml_tickets); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { if (crm_str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, TRUE) == FALSE) { continue; } unpack_ticket_state(xml_obj, data_set); } return TRUE; } static void unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_t * data_set) { const char *resource_discovery_enabled = NULL; xmlNode *attrs = NULL; pe_resource_t *rsc = NULL; if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) { return; } if ((this_node == NULL) || !pe__is_guest_or_remote_node(this_node)) { return; } crm_trace("Processing remote node id=%s, uname=%s", this_node->details->id, this_node->details->uname); this_node->details->remote_maintenance = crm_atoi(crm_element_value(state, XML_NODE_IS_MAINTENANCE), "0"); rsc = this_node->details->remote_rsc; if (this_node->details->remote_requires_reset == FALSE) { this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; } attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, this_node, TRUE, data_set); if (pe__shutdown_requested(this_node)) { crm_info("Node %s is shutting down", this_node->details->uname); this_node->details->shutdown = TRUE; if (rsc) { rsc->next_role = RSC_ROLE_STOPPED; } } if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) || (rsc && !is_set(rsc->flags, pe_rsc_managed))) { crm_info("Node %s is in maintenance-mode", this_node->details->uname); this_node->details->maintenance = TRUE; } resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { if (pe__is_remote_node(this_node) && is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_warn("Ignoring %s attribute on remote node %s because stonith is disabled", XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); } else { /* This is either a remote node with fencing enabled, or a guest * node. We don't care whether fencing is enabled when fencing guest * nodes, because they are "fenced" by recovering their containing * resource. */ crm_info("Node %s has resource discovery disabled", this_node->details->uname); this_node->details->rsc_discovery_enabled = FALSE; } } } static bool unpack_node_loop(xmlNode * status, bool fence, pe_working_set_t * data_set) { bool changed = false; xmlNode *lrm_rsc = NULL; for (xmlNode *state = __xml_first_child_element(status); state != NULL; state = __xml_next_element(state)) { const char *id = NULL; const char *uname = NULL; pe_node_t *this_node = NULL; bool process = FALSE; if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE) == FALSE) { continue; } id = crm_element_value(state, XML_ATTR_ID); uname = crm_element_value(state, XML_ATTR_UNAME); this_node = pe_find_node_any(data_set->nodes, id, uname); if (this_node == NULL) { crm_info("Node %s is unknown", id); continue; } else if (this_node->details->unpacked) { - crm_info("Node %s is already processed", id); + crm_trace("Node %s was already processed", id); continue; } else if (!pe__is_guest_or_remote_node(this_node) && is_set(data_set->flags, pe_flag_stonith_enabled)) { // A redundant test, but preserves the order for regression tests process = TRUE; } else if (pe__is_guest_or_remote_node(this_node)) { bool check = FALSE; pe_resource_t *rsc = this_node->details->remote_rsc; if(fence) { check = TRUE; } else if(rsc == NULL) { /* Not ready yet */ } else if (pe__is_guest_node(this_node) && rsc->role == RSC_ROLE_STARTED && rsc->container->role == RSC_ROLE_STARTED) { /* Both the connection and its containing resource need to be * known to be up before we process resources running in it. */ check = TRUE; crm_trace("Checking node %s/%s/%s status %d/%d/%d", id, rsc->id, rsc->container->id, fence, rsc->role, RSC_ROLE_STARTED); } else if (!pe__is_guest_node(this_node) && ((rsc->role == RSC_ROLE_STARTED) || is_set(data_set->flags, pe_flag_shutdown_lock))) { check = TRUE; crm_trace("Checking node %s/%s status %d/%d/%d", id, rsc->id, fence, rsc->role, RSC_ROLE_STARTED); } if (check) { determine_remote_online_status(data_set, this_node); unpack_handle_remote_attrs(this_node, state, data_set); process = TRUE; } } else if (this_node->details->online) { process = TRUE; } else if (fence) { process = TRUE; } else if (is_set(data_set->flags, pe_flag_shutdown_lock)) { process = TRUE; } if(process) { crm_trace("Processing lrm resource entries on %shealthy%s node: %s", fence?"un":"", (pe__is_guest_or_remote_node(this_node)? " remote" : ""), this_node->details->uname); changed = TRUE; this_node->details->unpacked = TRUE; lrm_rsc = find_xml_node(state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); unpack_lrm_resources(this_node, lrm_rsc, data_set); } } return changed; } /* remove nodes that are down, stopping */ /* create positive rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode * status, pe_working_set_t * data_set) { const char *id = NULL; const char *uname = NULL; xmlNode *state = NULL; pe_node_t *this_node = NULL; crm_trace("Beginning unpack"); if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } for (state = __xml_first_child_element(status); state != NULL; state = __xml_next_element(state)) { if (crm_str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, TRUE)) { unpack_tickets_state((xmlNode *) state, data_set); } else if (crm_str_eq((const char *)state->name, XML_CIB_TAG_STATE, TRUE)) { xmlNode *attrs = NULL; const char *resource_discovery_enabled = NULL; id = crm_element_value(state, XML_ATTR_ID); uname = crm_element_value(state, XML_ATTR_UNAME); this_node = pe_find_node_any(data_set->nodes, id, uname); if (uname == NULL) { /* error */ continue; } else if (this_node == NULL) { pcmk__config_warn("Ignoring recorded node status for '%s' " "because no longer in configuration", uname); continue; } else if (pe__is_guest_or_remote_node(this_node)) { /* online state for remote nodes is determined by the * rsc state after all the unpacking is done. we do however * need to mark whether or not the node has been fenced as this plays * a role during unpacking cluster node resource state */ this_node->details->remote_was_fenced = crm_atoi(crm_element_value(state, XML_NODE_IS_FENCED), "0"); continue; } crm_trace("Processing node id=%s, uname=%s", id, uname); /* Mark the node as provisionally clean * - at least we have seen it in the current cluster's lifetime */ this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); add_node_attrs(attrs, this_node, TRUE, data_set); if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance"))) { crm_info("Node %s is in maintenance-mode", this_node->details->uname); this_node->details->maintenance = TRUE; } resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY); if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) { crm_warn("ignoring %s attribute on node %s, disabling resource discovery is not allowed on cluster nodes", XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname); } crm_trace("determining node state"); determine_online_status(state, this_node, data_set); if (is_not_set(data_set->flags, pe_flag_have_quorum) && this_node->details->online && (data_set->no_quorum_policy == no_quorum_suicide)) { /* Everything else should flow from this automatically * (at least until the scheduler becomes able to migrate off * healthy resources) */ pe_fence_node(data_set, this_node, "cluster does not have quorum", FALSE); } } } while(unpack_node_loop(status, FALSE, data_set)) { crm_trace("Start another loop"); } // Now catch any nodes we didn't see unpack_node_loop(status, is_set(data_set->flags, pe_flag_stonith_enabled), data_set); /* Now that we know where resources are, we can schedule stops of containers * with failed bundle connections */ if (data_set->stop_needed != NULL) { for (GList *item = data_set->stop_needed; item; item = item->next) { pe_resource_t *container = item->data; pe_node_t *node = pe__current_node(container); if (node) { stop_action(container, node, FALSE); } } g_list_free(data_set->stop_needed); data_set->stop_needed = NULL; } for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *this_node = gIter->data; if (this_node == NULL) { continue; } else if (!pe__is_guest_or_remote_node(this_node)) { continue; } else if(this_node->details->unpacked) { continue; } determine_remote_online_status(data_set, this_node); } return TRUE; } static gboolean determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state, pe_node_t * this_node) { gboolean online = FALSE; const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE); const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER); const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER); const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); if (!crm_is_true(in_cluster)) { crm_trace("Node is down: in_cluster=%s", crm_str(in_cluster)); } else if (safe_str_eq(is_peer, ONLINESTATUS)) { if (safe_str_eq(join, CRMD_JOINSTATE_MEMBER)) { online = TRUE; } else { crm_debug("Node is not ready to run resources: %s", join); } } else if (this_node->details->expected_up == FALSE) { crm_trace("Controller is down: in_cluster=%s", crm_str(in_cluster)); crm_trace("\tis_peer=%s, join=%s, expected=%s", crm_str(is_peer), crm_str(join), crm_str(exp_state)); } else { /* mark it unclean */ pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE); crm_info("\tin_cluster=%s, is_peer=%s, join=%s, expected=%s", crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state)); } return online; } static gboolean determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state, pe_node_t * this_node) { gboolean online = FALSE; gboolean do_terminate = FALSE; bool crmd_online = FALSE; const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE); const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER); const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER); const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); const char *terminate = pe_node_attribute_raw(this_node, "terminate"); /* - XML_NODE_IN_CLUSTER ::= true|false - XML_NODE_IS_PEER ::= online|offline - XML_NODE_JOIN_STATE ::= member|down|pending|banned - XML_NODE_EXPECTED ::= member|down */ if (crm_is_true(terminate)) { do_terminate = TRUE; } else if (terminate != NULL && strlen(terminate) > 0) { /* could be a time() value */ char t = terminate[0]; if (t != '0' && isdigit(t)) { do_terminate = TRUE; } } crm_trace("%s: in_cluster=%s, is_peer=%s, join=%s, expected=%s, term=%d", this_node->details->uname, crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state), do_terminate); online = crm_is_true(in_cluster); crmd_online = safe_str_eq(is_peer, ONLINESTATUS); if (exp_state == NULL) { exp_state = CRMD_JOINSTATE_DOWN; } if (this_node->details->shutdown) { crm_debug("%s is shutting down", this_node->details->uname); /* Slightly different criteria since we can't shut down a dead peer */ online = crmd_online; } else if (in_cluster == NULL) { pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE); } else if (safe_str_eq(join, CRMD_JOINSTATE_NACK)) { pe_fence_node(data_set, this_node, "peer failed the pacemaker membership criteria", FALSE); } else if (do_terminate == FALSE && safe_str_eq(exp_state, CRMD_JOINSTATE_DOWN)) { if (crm_is_true(in_cluster) || crmd_online) { crm_info("- Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { crm_trace("%s is down or still coming up", this_node->details->uname); } } else if (do_terminate && safe_str_eq(join, CRMD_JOINSTATE_DOWN) && crm_is_true(in_cluster) == FALSE && !crmd_online) { crm_info("Node %s was just shot", this_node->details->uname); online = FALSE; } else if (crm_is_true(in_cluster) == FALSE) { // Consider `priority-fencing-delay` for lost nodes pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE); } else if (!crmd_online) { pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE); /* Everything is running at this point, now check join state */ } else if (do_terminate) { pe_fence_node(data_set, this_node, "termination was requested", FALSE); } else if (safe_str_eq(join, CRMD_JOINSTATE_MEMBER)) { crm_info("Node %s is active", this_node->details->uname); } else if (safe_str_eq(join, CRMD_JOINSTATE_PENDING) || safe_str_eq(join, CRMD_JOINSTATE_DOWN)) { crm_info("Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE); crm_warn("%s: in-cluster=%s, is-peer=%s, join=%s, expected=%s, term=%d, shutdown=%d", this_node->details->uname, crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state), do_terminate, this_node->details->shutdown); } return online; } -static gboolean +static void determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node) { pe_resource_t *rsc = this_node->details->remote_rsc; pe_resource_t *container = NULL; pe_node_t *host = NULL; /* If there is a node state entry for a (former) Pacemaker Remote node * but no resource creating that node, the node's connection resource will * be NULL. Consider it an offline remote node in that case. */ if (rsc == NULL) { this_node->details->online = FALSE; goto remote_online_done; } container = rsc->container; if (container && pcmk__list_of_1(rsc->running_on)) { host = rsc->running_on->data; } /* If the resource is currently started, mark it online. */ if (rsc->role == RSC_ROLE_STARTED) { crm_trace("%s node %s presumed ONLINE because connection resource is started", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = TRUE; } /* consider this node shutting down if transitioning start->stop */ if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) { crm_trace("%s node %s shutting down because connection resource is stopping", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->shutdown = TRUE; } /* Now check all the failure conditions. */ if(container && is_set(container->flags, pe_rsc_failed)) { crm_trace("Guest node %s UNCLEAN because guest resource failed", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } else if(is_set(rsc->flags, pe_rsc_failed)) { crm_trace("%s node %s OFFLINE because connection resource failed", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; } else if (rsc->role == RSC_ROLE_STOPPED || (container && container->role == RSC_ROLE_STOPPED)) { crm_trace("%s node %s OFFLINE because its resource is stopped", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = FALSE; } else if (host && (host->details->online == FALSE) && host->details->unclean) { crm_trace("Guest node %s UNCLEAN because host is unclean", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } remote_online_done: crm_trace("Remote node %s online=%s", this_node->details->id, this_node->details->online ? "TRUE" : "FALSE"); - return this_node->details->online; } -gboolean +static void determine_online_status(xmlNode * node_state, pe_node_t * this_node, pe_working_set_t * data_set) { gboolean online = FALSE; const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED); - CRM_CHECK(this_node != NULL, return FALSE); + CRM_CHECK(this_node != NULL, return); this_node->details->shutdown = FALSE; this_node->details->expected_up = FALSE; if (pe__shutdown_requested(this_node)) { this_node->details->shutdown = TRUE; } else if (safe_str_eq(exp_state, CRMD_JOINSTATE_MEMBER)) { this_node->details->expected_up = TRUE; } if (this_node->details->type == node_ping) { this_node->details->unclean = FALSE; online = FALSE; /* As far as resource management is concerned, * the node is safely offline. * Anyone caught abusing this logic will be shot */ } else if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { online = determine_online_status_no_fencing(data_set, node_state, this_node); } else { online = determine_online_status_fencing(data_set, node_state, this_node); } if (online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if (online && this_node->details->shutdown) { /* don't run resources here */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if (this_node->details->type == node_ping) { crm_info("Node %s is not a pacemaker node", this_node->details->uname); } else if (this_node->details->unclean) { pe_proc_warn("Node %s is unclean", this_node->details->uname); } else if (this_node->details->online) { crm_info("Node %s is %s", this_node->details->uname, this_node->details->shutdown ? "shutting down" : this_node->details->pending ? "pending" : this_node->details->standby ? "standby" : this_node->details->maintenance ? "maintenance" : "online"); } else { crm_trace("Node %s is offline", this_node->details->uname); } - - return online; } /*! * \internal * \brief Find the end of a resource's name, excluding any clone suffix * * \param[in] id Resource ID to check * * \return Pointer to last character of resource's base name */ const char * pe_base_name_end(const char *id) { if (!pcmk__str_empty(id)) { const char *end = id + strlen(id) - 1; for (const char *s = end; s > id; --s) { switch (*s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': return (s == end)? s : (s - 1); default: return end; } } return end; } return NULL; } /*! * \internal * \brief Get a resource name excluding any clone suffix * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_strip(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); char *basename = NULL; CRM_ASSERT(end); basename = strndup(last_rsc_id, end - last_rsc_id + 1); CRM_ASSERT(basename); return basename; } /*! * \internal * \brief Get the name of the first instance of a cloned resource * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name plus :0 * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_zero(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); size_t base_name_len = end - last_rsc_id + 1; char *zero = NULL; CRM_ASSERT(end); zero = calloc(base_name_len + 3, sizeof(char)); CRM_ASSERT(zero); memcpy(zero, last_rsc_id, base_name_len); zero[base_name_len] = ':'; zero[base_name_len + 1] = '0'; return zero; } static pe_resource_t * create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set) { pe_resource_t *rsc = NULL; xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); copy_in_properties(xml_rsc, rsc_entry); crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if (!common_unpack(xml_rsc, &rsc, NULL, data_set)) { return NULL; } if (xml_contains_remote_node(xml_rsc)) { pe_node_t *node; crm_debug("Detected orphaned remote node %s", rsc_id); node = pe_find_node(data_set->nodes, rsc_id); if (node == NULL) { node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set); } link_rsc2remotenode(data_set, rsc); if (node) { crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id); node->details->shutdown = TRUE; } } if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) { /* This orphaned rsc needs to be mapped to a container. */ crm_trace("Detected orphaned container filler %s", rsc_id); set_bit(rsc->flags, pe_rsc_orphan_container_filler); } set_bit(rsc->flags, pe_rsc_orphan); data_set->resources = g_list_append(data_set->resources, rsc); return rsc; } /*! * \internal * \brief Create orphan instance for anonymous clone resource history */ static pe_resource_t * create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id, pe_node_t *node, pe_working_set_t *data_set) { pe_resource_t *top = pe__create_clone_child(parent, data_set); // find_rsc() because we might be a cloned group pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone); pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id, node->details->uname); return orphan; } /*! * \internal * \brief Check a node for an instance of an anonymous clone * * Return a child instance of the specified anonymous clone, in order of * preference: (1) the instance running on the specified node, if any; * (2) an inactive instance (i.e. within the total of clone-max instances); * (3) a newly created orphan (i.e. clone-max instances are already active). * * \param[in] data_set Cluster information * \param[in] node Node on which to check for instance * \param[in] parent Clone to check * \param[in] rsc_id Name of cloned resource in history (without instance) */ static pe_resource_t * find_anonymous_clone(pe_working_set_t * data_set, pe_node_t * node, pe_resource_t * parent, const char *rsc_id) { GListPtr rIter = NULL; pe_resource_t *rsc = NULL; pe_resource_t *inactive_instance = NULL; gboolean skip_inactive = FALSE; CRM_ASSERT(parent != NULL); CRM_ASSERT(pe_rsc_is_clone(parent)); CRM_ASSERT(is_not_set(parent->flags, pe_rsc_unique)); // Check for active (or partially active, for cloned groups) instance pe_rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, node->details->uname, parent->id); for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) { GListPtr locations = NULL; pe_resource_t *child = rIter->data; /* Check whether this instance is already known to be active or pending * anywhere, at this stage of unpacking. Because this function is called * for a resource before the resource's individual operation history * entries are unpacked, locations will generally not contain the * desired node. * * However, there are three exceptions: * (1) when child is a cloned group and we have already unpacked the * history of another member of the group on the same node; * (2) when we've already unpacked the history of another numbered * instance on the same node (which can happen if globally-unique * was flipped from true to false); and * (3) when we re-run calculations on the same data set as part of a * simulation. */ child->fns->location(child, &locations, 2); if (locations) { /* We should never associate the same numbered anonymous clone * instance with multiple nodes, and clone instances can't migrate, * so there must be only one location, regardless of history. */ CRM_LOG_ASSERT(locations->next == NULL); if (((pe_node_t *)locations->data)->details == node->details) { /* This child instance is active on the requested node, so check * for a corresponding configured resource. We use find_rsc() * instead of child because child may be a cloned group, and we * need the particular member corresponding to rsc_id. * * If the history entry is orphaned, rsc will be NULL. */ rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone); if (rsc) { /* If there are multiple instance history entries for an * anonymous clone in a single node's history (which can * happen if globally-unique is switched from true to * false), we want to consider the instances beyond the * first as orphans, even if there are inactive instance * numbers available. */ if (rsc->running_on) { crm_notice("Active (now-)anonymous clone %s has " "multiple (orphan) instance histories on %s", parent->id, node->details->uname); skip_inactive = TRUE; rsc = NULL; } else { pe_rsc_trace(parent, "Resource %s, active", rsc->id); } } } g_list_free(locations); } else { pe_rsc_trace(parent, "Resource %s, skip inactive", child->id); if (!skip_inactive && !inactive_instance && is_not_set(child->flags, pe_rsc_block)) { // Remember one inactive instance in case we don't find active inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone); /* ... but don't use it if it was already associated with a * pending action on another node */ if (inactive_instance && inactive_instance->pending_node && (inactive_instance->pending_node->details != node->details)) { inactive_instance = NULL; } } } } if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) { pe_rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id); rsc = inactive_instance; } /* If the resource has "requires" set to "quorum" or "nothing", and we don't * have a clone instance for every node, we don't want to consume a valid * instance number for unclean nodes. Such instances may appear to be active * according to the history, but should be considered inactive, so we can * start an instance elsewhere. Treat such instances as orphans. * * An exception is instances running on guest nodes -- since guest node * "fencing" is actually just a resource stop, requires shouldn't apply. * * @TODO Ideally, we'd use an inactive instance number if it is not needed * for any clean instances. However, we don't know that at this point. */ if ((rsc != NULL) && is_not_set(rsc->flags, pe_rsc_needs_fencing) && (!node->details->online || node->details->unclean) && !pe__is_guest_node(node) && !pe__is_universal_clone(parent, data_set)) { rsc = NULL; } if (rsc == NULL) { rsc = create_anonymous_orphan(parent, rsc_id, node, data_set); pe_rsc_trace(parent, "Resource %s, orphan", rsc->id); } return rsc; } static pe_resource_t * unpack_find_resource(pe_working_set_t * data_set, pe_node_t * node, const char *rsc_id, xmlNode * rsc_entry) { pe_resource_t *rsc = NULL; pe_resource_t *parent = NULL; crm_trace("looking for %s", rsc_id); rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { /* If we didn't find the resource by its name in the operation history, * check it again as a clone instance. Even when clone-max=0, we create * a single :0 orphan to match against here. */ char *clone0_id = clone_zero(rsc_id); pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id); if (clone0 && is_not_set(clone0->flags, pe_rsc_unique)) { rsc = clone0; parent = uber_parent(clone0); crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id); } else { crm_trace("%s is not known as %s either (orphan)", rsc_id, clone0_id); } free(clone0_id); } else if (rsc->variant > pe_native) { crm_trace("Resource history for %s is orphaned because it is no longer primitive", rsc_id); return NULL; } else { parent = uber_parent(rsc); } if (pe_rsc_is_anon_clone(parent)) { if (pe_rsc_is_bundled(parent)) { rsc = pe__find_bundle_replica(parent->parent, node); } else { char *base = clone_strip(rsc_id); rsc = find_anonymous_clone(data_set, node, parent, base); free(base); CRM_ASSERT(rsc != NULL); } } if (rsc && safe_str_neq(rsc_id, rsc->id) && safe_str_neq(rsc_id, rsc->clone_name)) { free(rsc->clone_name); rsc->clone_name = strdup(rsc_id); pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s", rsc_id, node->details->uname, rsc->id, (is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : "")); } return rsc; } static pe_resource_t * process_orphan_resource(xmlNode * rsc_entry, pe_node_t * node, pe_working_set_t * data_set) { pe_resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname); rsc = create_fake_resource(rsc_id, rsc_entry, data_set); if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } else { CRM_CHECK(rsc != NULL, return NULL); pe_rsc_trace(rsc, "Added orphan %s", rsc->id); resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set); } return rsc; } static void process_rsc_state(pe_resource_t * rsc, pe_node_t * node, enum action_fail_response on_fail, xmlNode * migrate_op, pe_working_set_t * data_set) { pe_node_t *tmpnode = NULL; char *reason = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s", rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail)); /* process current state */ if (rsc->role != RSC_ROLE_UNKNOWN) { pe_resource_t *iter = rsc; while (iter) { if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) { pe_node_t *n = node_copy(node); pe_rsc_trace(rsc, "%s (aka. %s) known on %s", rsc->id, rsc->clone_name, n->details->uname); g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n); } if (is_set(iter->flags, pe_rsc_unique)) { break; } iter = iter->parent; } } /* If a managed resource is believed to be running, but node is down ... */ if (rsc->role > RSC_ROLE_STOPPED && node->details->online == FALSE && node->details->maintenance == FALSE && is_set(rsc->flags, pe_rsc_managed)) { gboolean should_fence = FALSE; /* If this is a guest node, fence it (regardless of whether fencing is * enabled, because guest node fencing is done by recovery of the * container resource rather than by the fencer). Mark the resource * we're processing as failed. When the guest comes back up, its * operation history in the CIB will be cleared, freeing the affected * resource to run again once we are sure we know its state. */ if (pe__is_guest_node(node)) { set_bit(rsc->flags, pe_rsc_failed); should_fence = TRUE; } else if (is_set(data_set->flags, pe_flag_stonith_enabled)) { if (pe__is_remote_node(node) && node->details->remote_rsc && is_not_set(node->details->remote_rsc->flags, pe_rsc_failed)) { /* Setting unseen means that fencing of the remote node will * occur only if the connection resource is not going to start * somewhere. This allows connection resources on a failed * cluster node to move to another node without requiring the * remote nodes to be fenced as well. */ node->details->unseen = TRUE; reason = crm_strdup_printf("%s is active there (fencing will be" " revoked if remote connection can " "be re-established elsewhere)", rsc->id); } should_fence = TRUE; } if (should_fence) { if (reason == NULL) { reason = crm_strdup_printf("%s is thought to be active there", rsc->id); } pe_fence_node(data_set, node, reason, FALSE); } free(reason); } if (node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = action_fail_ignore; } switch (on_fail) { case action_fail_ignore: /* nothing to do */ break; case action_fail_fence: /* treat it as if it is still running * but also mark the node as unclean */ reason = crm_strdup_printf("%s failed there", rsc->id); pe_fence_node(data_set, node, reason, FALSE); free(reason); break; case action_fail_standby: node->details->standby = TRUE; node->details->standby_onfail = TRUE; break; case action_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ clear_bit(rsc->flags, pe_rsc_managed); set_bit(rsc->flags, pe_rsc_block); break; case action_fail_migrate: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set); break; case action_fail_stop: rsc->next_role = RSC_ROLE_STOPPED; break; case action_fail_recover: if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { set_bit(rsc->flags, pe_rsc_failed); stop_action(rsc, node, FALSE); } break; case action_fail_restart_container: set_bit(rsc->flags, pe_rsc_failed); if (rsc->container && pe_rsc_is_bundled(rsc)) { /* A bundle's remote connection can run on a different node than * the bundle's container. We don't necessarily know where the * container is running yet, so remember it and add a stop * action for it later. */ data_set->stop_needed = g_list_prepend(data_set->stop_needed, rsc->container); } else if (rsc->container) { stop_action(rsc->container, node, FALSE); } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { stop_action(rsc, node, FALSE); } break; case action_fail_reset_remote: set_bit(rsc->flags, pe_rsc_failed); if (is_set(data_set->flags, pe_flag_stonith_enabled)) { tmpnode = NULL; if (rsc->is_remote_node) { tmpnode = pe_find_node(data_set->nodes, rsc->id); } if (tmpnode && pe__is_remote_node(tmpnode) && tmpnode->details->remote_was_fenced == 0) { /* The remote connection resource failed in a way that * should result in fencing the remote node. */ pe_fence_node(data_set, tmpnode, "remote connection is unrecoverable", FALSE); } } /* require the stop action regardless if fencing is occurring or not. */ if (rsc->role > RSC_ROLE_STOPPED) { stop_action(rsc, node, FALSE); } /* if reconnect delay is in use, prevent the connection from exiting the * "STOPPED" role until the failure is cleared by the delay timeout. */ if (rsc->remote_reconnect_ms) { rsc->next_role = RSC_ROLE_STOPPED; } break; } /* ensure a remote-node connection failure forces an unclean remote-node * to be fenced. By setting unseen = FALSE, the remote-node failure will * result in a fencing operation regardless if we're going to attempt to * reconnect to the remote-node in this transition or not. */ if (is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) { tmpnode = pe_find_node(data_set->nodes, rsc->id); if (tmpnode && tmpnode->details->unclean) { tmpnode->details->unseen = FALSE; } } if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { if (is_set(rsc->flags, pe_rsc_orphan)) { if (is_set(rsc->flags, pe_rsc_managed)) { pcmk__config_warn("Detected active orphan %s running on %s", rsc->id, node->details->uname); } else { pcmk__config_warn("Resource '%s' must be stopped manually on " "%s because cluster is configured not to " "stop active orphans", rsc->id, node->details->uname); } } native_add_running(rsc, node, data_set); if (on_fail != action_fail_ignore) { set_bit(rsc->flags, pe_rsc_failed); } } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { /* Only do this for older status sections that included instance numbers * Otherwise stopped instances will appear as orphans */ pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id); free(rsc->clone_name); rsc->clone_name = NULL; } else { GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP, FALSE); GListPtr gIter = possible_matches; for (; gIter != NULL; gIter = gIter->next) { pe_action_t *stop = (pe_action_t *) gIter->data; stop->flags |= pe_action_optional; } g_list_free(possible_matches); } } /* create active recurring operations as optional */ static void process_recurring(pe_node_t * node, pe_resource_t * rsc, int start_index, int stop_index, GListPtr sorted_op_list, pe_working_set_t * data_set) { int counter = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; char *key = NULL; const char *id = ID(rsc_op); counter++; if (node->details->online == FALSE) { pe_rsc_trace(rsc, "Skipping %s/%s: node is offline", rsc->id, node->details->uname); break; /* Need to check if there's a monitor for role="Stopped" */ } else if (start_index < stop_index && counter <= stop_index) { pe_rsc_trace(rsc, "Skipping %s/%s: resource is not active", id, node->details->uname); continue; } else if (counter < start_index) { pe_rsc_trace(rsc, "Skipping %s/%s: old %d", id, node->details->uname, counter); continue; } crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if (interval_ms == 0) { pe_rsc_trace(rsc, "Skipping %s/%s: non-recurring", id, node->details->uname); continue; } status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (safe_str_eq(status, "-1")) { pe_rsc_trace(rsc, "Skipping %s/%s: status", id, node->details->uname); continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); /* create the action */ key = pcmk__op_key(rsc->id, task, interval_ms); pe_rsc_trace(rsc, "Creating %s/%s", key, node->details->uname); custom_action(rsc, key, task, node, TRUE, TRUE, data_set); } } void calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index) { int counter = -1; int implied_monitor_start = -1; int implied_clone_start = -1; const char *task = NULL; const char *status = NULL; GListPtr gIter = sorted_op_list; *stop_index = -1; *start_index = -1; for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if (safe_str_eq(task, CRMD_ACTION_STOP) && safe_str_eq(status, "0")) { *stop_index = counter; } else if (safe_str_eq(task, CRMD_ACTION_START) || safe_str_eq(task, CRMD_ACTION_MIGRATED)) { *start_index = counter; } else if ((implied_monitor_start <= *stop_index) && safe_str_eq(task, CRMD_ACTION_STATUS)) { const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); if (safe_str_eq(rc, "0") || safe_str_eq(rc, "8")) { implied_monitor_start = counter; } } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE) || safe_str_eq(task, CRMD_ACTION_DEMOTE)) { implied_clone_start = counter; } } if (*start_index == -1) { if (implied_clone_start != -1) { *start_index = implied_clone_start; } else if (implied_monitor_start != -1) { *start_index = implied_monitor_start; } } } // If resource history entry has shutdown lock, remember lock node and time static void unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { time_t lock_time = 0; // When lock started (i.e. node shutdown time) if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK, &lock_time) == pcmk_ok) && (lock_time != 0)) { if ((data_set->shutdown_lock > 0) && (get_effective_time(data_set) > (lock_time + data_set->shutdown_lock))) { pe_rsc_info(rsc, "Shutdown lock for %s on %s expired", rsc->id, node->details->uname); pe__clear_resource_history(rsc, node, data_set); } else { rsc->lock_node = node; rsc->lock_time = lock_time; } } } static pe_resource_t * unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * data_set) { GListPtr gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = RSC_ROLE_UNKNOWN; const char *task = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); pe_resource_t *rsc = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; xmlNode *migrate_op = NULL; xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; enum action_fail_response on_fail = FALSE; enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; crm_trace("[%s] Processing %s on %s", crm_element_name(rsc_entry), rsc_id, node->details->uname); /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_prepend(op_list, rsc_op); } } if (is_not_set(data_set->flags, pe_flag_shutdown_lock)) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } } /* find the resource */ rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry); if (rsc == NULL) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } else { rsc = process_orphan_resource(rsc_entry, node, data_set); } } CRM_ASSERT(rsc != NULL); // Check whether the resource is "shutdown-locked" to this node if (is_set(data_set->flags, pe_flag_shutdown_lock)) { unpack_shutdown_lock(rsc_entry, rsc, node, data_set); } /* process operations */ saved_role = rsc->role; on_fail = action_fail_ignore; rsc->role = RSC_ROLE_UNKNOWN; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { migrate_op = rsc_op; } unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail, migrate_op, data_set); if (get_target_role(rsc, &req_role)) { if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) { pe_rsc_debug(rsc, "%s: Overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); rsc->next_role = req_role; } else if (req_role > rsc->next_role) { pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); } } if (saved_role > rsc->role) { rsc->role = saved_role; } return rsc; } static void handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set) { xmlNode *rsc_entry = NULL; for (rsc_entry = __xml_first_child_element(lrm_rsc_list); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { pe_resource_t *rsc; pe_resource_t *container; const char *rsc_id; const char *container_id; if (safe_str_neq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE)) { continue; } container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER); rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); if (container_id == NULL || rsc_id == NULL) { continue; } container = pe_find_resource(data_set->resources, container_id); if (container == NULL) { continue; } rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL || is_set(rsc->flags, pe_rsc_orphan_container_filler) == FALSE || rsc->container != NULL) { continue; } pe_rsc_trace(rsc, "Mapped container of orphaned resource %s to %s", rsc->id, container_id); rsc->container = container; container->fillers = g_list_append(container->fillers, rsc); } } gboolean unpack_lrm_resources(pe_node_t * node, xmlNode * lrm_rsc_list, pe_working_set_t * data_set) { xmlNode *rsc_entry = NULL; gboolean found_orphaned_container_filler = FALSE; CRM_CHECK(node != NULL, return FALSE); crm_trace("Unpacking resources on %s", node->details->uname); for (rsc_entry = __xml_first_child_element(lrm_rsc_list); rsc_entry != NULL; rsc_entry = __xml_next_element(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { pe_resource_t *rsc = unpack_lrm_rsc_state(node, rsc_entry, data_set); if (!rsc) { continue; } if (is_set(rsc->flags, pe_rsc_orphan_container_filler)) { found_orphaned_container_filler = TRUE; } } } /* now that all the resource state has been unpacked for this node * we have to go back and map any orphaned container fillers to their * container resource */ if (found_orphaned_container_filler) { handle_orphaned_container_fillers(lrm_rsc_list, data_set); } return TRUE; } static void set_active(pe_resource_t * rsc) { pe_resource_t *top = uber_parent(rsc); if (top && is_set(top->flags, pe_rsc_promotable)) { rsc->role = RSC_ROLE_SLAVE; } else { rsc->role = RSC_ROLE_STARTED; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { pe_node_t *node = value; int *score = user_data; node->weight = *score; } #define STATUS_PATH_MAX 1024 static xmlNode * find_lrm_op(const char *resource, const char *op, const char *node, const char *source, bool success_only, pe_working_set_t *data_set) { int offset = 0; char xpath[STATUS_PATH_MAX]; xmlNode *xml = NULL; offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//node_state[@uname='%s']", node); offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//" XML_LRM_TAG_RESOURCE "[@id='%s']", resource); /* Need to check against transition_magic too? */ if (source && safe_str_eq(op, CRMD_ACTION_MIGRATE)) { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_target='%s']", op, source); } else if (source && safe_str_eq(op, CRMD_ACTION_MIGRATED)) { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_source='%s']", op, source); } else { offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "/" XML_LRM_TAG_RSC_OP "[@operation='%s']", op); } CRM_LOG_ASSERT(offset > 0); xml = get_xpath_object(xpath, data_set->input, LOG_DEBUG); if (xml && success_only) { int rc = PCMK_OCF_UNKNOWN_ERROR; int status = PCMK_LRM_OP_ERROR; crm_element_value_int(xml, XML_LRM_ATTR_RC, &rc); crm_element_value_int(xml, XML_LRM_ATTR_OPSTATUS, &status); if ((rc != PCMK_OCF_OK) || (status != PCMK_LRM_OP_DONE)) { return NULL; } } return xml; } static int pe__call_id(xmlNode *op_xml) { int id = 0; if (op_xml) { crm_element_value_int(op_xml, XML_LRM_ATTR_CALLID, &id); } return id; } /*! * \brief Check whether a stop happened on the same node after some event * * \param[in] rsc Resource being checked * \param[in] node Node being checked * \param[in] xml_op Event that stop is being compared to * \param[in] data_set Cluster working set * * \return TRUE if stop happened after event, FALSE otherwise * * \note This is really unnecessary, but kept as a safety mechanism. We * currently don't save more than one successful event in history, so this * only matters when processing really old CIB files that we don't * technically support anymore, or as preparation for logging an extended * history in the future. */ static bool stop_happened_after(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP, node->details->uname, NULL, TRUE, data_set); return (stop_op && (pe__call_id(stop_op) > pe__call_id(xml_op))); } static void unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { /* A successful migration sequence is: * migrate_to on source node * migrate_from on target node * stop on source node * * If a migrate_to is followed by a stop, the entire migration (successful * or failed) is complete, and we don't care what happened on the target. * * If no migrate_from has happened, the migration is considered to be * "partial". If the migrate_from failed, make sure the resource gets * stopped on both source and target (if up). * * If the migrate_to and migrate_from both succeeded (which also implies the * resource is no longer running on the source), but there is no stop, the * migration is considered to be "dangling". Schedule a stop on the source * in this case. */ int from_rc = 0; int from_status = 0; pe_node_t *target_node = NULL; pe_node_t *source_node = NULL; xmlNode *migrate_from = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(source, node->details->uname), return); if (stop_happened_after(rsc, node, xml_op, data_set)) { return; } // Clones are not allowed to migrate, so role can't be master rsc->role = RSC_ROLE_STARTED; target_node = pe_find_node(data_set->nodes, target); source_node = pe_find_node(data_set->nodes, source); // Check whether there was a migrate_from action on the target migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target, source, FALSE, data_set); if (migrate_from) { crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc); crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status); pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d", ID(migrate_from), target, from_status, from_rc); } if (migrate_from && from_rc == PCMK_OCF_OK && from_status == PCMK_LRM_OP_DONE) { /* The migrate_to and migrate_from both succeeded, so mark the migration * as "dangling". This will be used to schedule a stop action on the * source without affecting the target. */ pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op), source); rsc->role = RSC_ROLE_STOPPED; rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } else if (migrate_from && (from_status != PCMK_LRM_OP_PENDING)) { // Failed if (target_node && target_node->details->online) { pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node, target_node->details->online); native_add_running(rsc, target_node, data_set); } } else { // Pending, or complete but erased if (target_node && target_node->details->online) { pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node, target_node->details->online); native_add_running(rsc, target_node, data_set); if (source_node && source_node->details->online) { /* This is a partial migration: the migrate_to completed * successfully on the source, but the migrate_from has not * completed. Remember the source and target; if the newly * chosen target remains the same when we schedule actions * later, we may continue with the migration. */ rsc->partial_migration_target = target_node; rsc->partial_migration_source = source_node; } } else { /* Consider it failed here - forces a restart, prevents migration */ set_bit(rsc->flags, pe_rsc_failed); clear_bit(rsc->flags, pe_rsc_allow_migrate); } } } static void unpack_migrate_to_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { int target_stop_id = 0; int target_migrate_from_id = 0; xmlNode *target_stop = NULL; xmlNode *target_migrate_from = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(source, node->details->uname), return); /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be master. */ rsc->role = RSC_ROLE_STARTED; // Check for stop on the target target_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, target, NULL, TRUE, data_set); target_stop_id = pe__call_id(target_stop); // Check for migrate_from on the target target_migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target, source, TRUE, data_set); target_migrate_from_id = pe__call_id(target_migrate_from); if ((target_stop == NULL) || (target_stop_id < target_migrate_from_id)) { /* There was no stop on the source, or a stop that happened before a * migrate_from, so assume the resource is still active on the target * (if it is up). */ pe_node_t *target_node = pe_find_node(data_set->nodes, target); pe_rsc_trace(rsc, "stop (%d) + migrate_from (%d)", target_stop_id, target_migrate_from_id); if (target_node && target_node->details->online) { native_add_running(rsc, target_node, data_set); } } else if (target_migrate_from == NULL) { /* We know there was a stop on the target, but there may not have been a * migrate_from (the stop could have happened before migrate_from was * scheduled or attempted). * * That means this could be a "dangling" migration. But first, check * whether there is a newer migrate_from or start on the source node -- * it's possible the failed migration was followed by a successful * full restart or migration in the reverse direction, in which case we * don't want to force it to stop. */ xmlNode *source_migrate_from = NULL; xmlNode *source_start = NULL; int source_migrate_to_id = pe__call_id(xml_op); source_migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, source, NULL, TRUE, data_set); if (pe__call_id(source_migrate_from) > source_migrate_to_id) { return; } source_start = find_lrm_op(rsc->id, CRMD_ACTION_START, source, NULL, TRUE, data_set); if (pe__call_id(source_start) > source_migrate_to_id) { return; } // Mark node as having dangling migration so we can force a stop later rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node); } } static void unpack_migrate_from_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, pe_working_set_t *data_set) { xmlNode *source_stop = NULL; xmlNode *source_migrate_to = NULL; const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE); const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); // Sanity check CRM_CHECK(source && target && !strcmp(target, node->details->uname), return); /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be master. */ rsc->role = RSC_ROLE_STARTED; // Check for a stop on the source source_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, source, NULL, TRUE, data_set); // Check for a migrate_to on the source source_migrate_to = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE, source, target, TRUE, data_set); if ((source_stop == NULL) || (pe__call_id(source_stop) < pe__call_id(source_migrate_to))) { /* There was no stop on the source, or a stop that happened before * migrate_to, so assume the resource is still active on the source (if * it is up). */ pe_node_t *source_node = pe_find_node(data_set->nodes, source); if (source_node && source_node->details->online) { native_add_running(rsc, source_node, data_set); } } } static void record_failed_op(xmlNode *op, const pe_node_t *node, const pe_resource_t *rsc, pe_working_set_t *data_set) { xmlNode *xIter = NULL; const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY); if (node->details->online == FALSE) { return; } for (xIter = data_set->failed->children; xIter; xIter = xIter->next) { const char *key = crm_element_value(xIter, XML_LRM_ATTR_TASK_KEY); const char *uname = crm_element_value(xIter, XML_ATTR_UNAME); if(safe_str_eq(op_key, key) && safe_str_eq(uname, node->details->uname)) { crm_trace("Skipping duplicate entry %s on %s", op_key, node->details->uname); return; } } crm_trace("Adding entry %s on %s", op_key, node->details->uname); crm_xml_add(op, XML_ATTR_UNAME, node->details->uname); crm_xml_add(op, XML_LRM_ATTR_RSCID, rsc->id); add_node_copy(data_set->failed, op); } static const char *get_op_key(xmlNode *xml_op) { const char *key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); if(key == NULL) { key = ID(xml_op); } return key; } static const char * last_change_str(xmlNode *xml_op) { time_t when; const char *when_s = NULL; if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &when) == pcmk_ok) { when_s = pcmk__epoch2str(&when); if (when_s) { // Skip day of week to make message shorter when_s = strchr(when_s, ' '); if (when_s) { ++when_s; } } } return ((when_s && *when_s)? when_s : "unknown time"); } static void unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { guint interval_ms = 0; bool is_probe = false; pe_action_t *action = NULL; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); CRM_ASSERT(rsc); CRM_CHECK(task != NULL, return); *last_failure = xml_op; crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) { is_probe = true; } if (exit_reason == NULL) { exit_reason = ""; } if (is_not_set(data_set->flags, pe_flag_symmetric_cluster) && (rc == PCMK_OCF_NOT_INSTALLED)) { crm_trace("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " CRM_XS " rc=%d id=%s", services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, (is_probe? "probe" : task), rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); } else { crm_warn("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " CRM_XS " rc=%d id=%s", services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, (is_probe? "probe" : task), rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); if (is_probe && (rc != PCMK_OCF_OK) && (rc != PCMK_OCF_NOT_RUNNING) && (rc != PCMK_OCF_RUNNING_MASTER)) { /* A failed (not just unexpected) probe result could mean the user * didn't know resources will be probed even where they can't run. */ crm_notice("If it is not possible for %s to run on %s, see " "the resource-discovery option for location constraints", rsc->id, node->details->uname); } record_failed_op(xml_op, node, rsc, data_set); } action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); if ((action->on_fail <= action_fail_fence && *on_fail < action->on_fail) || (action->on_fail == action_fail_reset_remote && *on_fail <= action_fail_recover) || (action->on_fail == action_fail_restart_container && *on_fail <= action_fail_recover) || (*on_fail == action_fail_restart_container && action->on_fail >= action_fail_migrate)) { pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail), fail2text(action->on_fail), action->uuid, key); *on_fail = action->on_fail; } if (!strcmp(task, CRMD_ACTION_STOP)) { resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set); } else if (!strcmp(task, CRMD_ACTION_MIGRATE)) { unpack_migrate_to_failure(rsc, node, xml_op, data_set); } else if (!strcmp(task, CRMD_ACTION_MIGRATED)) { unpack_migrate_from_failure(rsc, node, xml_op, data_set); } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if (!strcmp(task, CRMD_ACTION_DEMOTE)) { if (action->on_fail == action_fail_block) { rsc->role = RSC_ROLE_MASTER; rsc->next_role = RSC_ROLE_STOPPED; } else if(rc == PCMK_OCF_NOT_RUNNING) { rsc->role = RSC_ROLE_STOPPED; } else { /* Staying in master role would put the scheduler and controller * into a loop. Setting slave role is not dangerous because the * resource will be stopped as part of recovery, and any master * promotion will be ordered after that stop. */ rsc->role = RSC_ROLE_SLAVE; } } if(is_probe && rc == PCMK_OCF_NOT_INSTALLED) { /* leave stopped */ pe_rsc_trace(rsc, "Leaving %s stopped", rsc->id); rsc->role = RSC_ROLE_STOPPED; } else if (rsc->role < RSC_ROLE_STARTED) { pe_rsc_trace(rsc, "Setting %s active", rsc->id); set_active(rsc); } pe_rsc_trace(rsc, "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s", rsc->id, role2text(rsc->role), node->details->unclean ? "true" : "false", fail2text(action->on_fail), role2text(action->fail_role)); if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) { rsc->next_role = action->fail_role; } if (action->fail_role == RSC_ROLE_STOPPED) { int score = -INFINITY; pe_resource_t *fail_rsc = rsc; if (fail_rsc->parent) { pe_resource_t *parent = uber_parent(fail_rsc); if (pe_rsc_is_clone(parent) && is_not_set(parent->flags, pe_rsc_unique)) { /* For clone resources, if a child fails on an operation * with on-fail = stop, all the resources fail. Do this by preventing * the parent from coming up again. */ fail_rsc = parent; } } crm_notice("%s will not be started under current conditions", fail_rsc->id); /* make sure it doesn't come up again */ if (fail_rsc->allowed_nodes != NULL) { g_hash_table_destroy(fail_rsc->allowed_nodes); } fail_rsc->allowed_nodes = node_hash_from_list(data_set->nodes); g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score); } pe_free_action(action); } /*! * \internal * \brief Remap operation status based on action result * * Given an action result, determine an appropriate operation status for the * purposes of responding to the action (the status provided by the executor is * not directly usable since the executor does not know what was expected). * * \param[in,out] rsc Resource that operation history entry is for * \param[in] rc Actual return code of operation * \param[in] target_rc Expected return code of operation * \param[in] node Node where operation was executed * \param[in] xml_op Operation history entry XML from CIB status * \param[in,out] on_fail What should be done about the result * \param[in] data_set Current cluster working set * * \return Operation status based on return code and action info * \note This may update the resource's current and next role. */ static int determine_op_status( pe_resource_t *rsc, int rc, int target_rc, pe_node_t * node, xmlNode * xml_op, enum action_fail_response * on_fail, pe_working_set_t * data_set) { guint interval_ms = 0; bool is_probe = false; int result = PCMK_LRM_OP_DONE; const char *key = get_op_key(xml_op); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); CRM_ASSERT(rsc); CRM_CHECK(task != NULL, return PCMK_LRM_OP_ERROR); if (exit_reason == NULL) { exit_reason = ""; } crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) { is_probe = true; task = "probe"; } if (target_rc < 0) { /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the * target_rc in the transition key, which (along with the similar case * of a corrupted transition key in the CIB) will be reported to this * function as -1. Pacemaker 2.0+ does not support rolling upgrades from * those versions or processing of saved CIB files from those versions, * so we do not need to care much about this case. */ result = PCMK_LRM_OP_ERROR; crm_warn("Expected result not found for %s on %s (corrupt or obsolete CIB?)", key, node->details->uname); } else if (target_rc != rc) { result = PCMK_LRM_OP_ERROR; pe_rsc_debug(rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)", key, node->details->uname, target_rc, services_ocf_exitcode_str(target_rc), rc, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason); } switch (rc) { case PCMK_OCF_OK: if (is_probe && (target_rc == PCMK_OCF_NOT_RUNNING)) { result = PCMK_LRM_OP_DONE; pe_rsc_info(rsc, "Probe found %s active on %s at %s", rsc->id, node->details->uname, last_change_str(xml_op)); } break; case PCMK_OCF_NOT_RUNNING: if (is_probe || target_rc == rc || is_not_set(rsc->flags, pe_rsc_managed)) { result = PCMK_LRM_OP_DONE; rsc->role = RSC_ROLE_STOPPED; /* clear any previous failure actions */ *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } break; case PCMK_OCF_RUNNING_MASTER: if (is_probe && (rc != target_rc)) { result = PCMK_LRM_OP_DONE; pe_rsc_info(rsc, "Probe found %s active and promoted on %s at %s", rsc->id, node->details->uname, last_change_str(xml_op)); } rsc->role = RSC_ROLE_MASTER; break; case PCMK_OCF_DEGRADED_MASTER: case PCMK_OCF_FAILED_MASTER: rsc->role = RSC_ROLE_MASTER; result = PCMK_LRM_OP_ERROR; break; case PCMK_OCF_NOT_CONFIGURED: result = PCMK_LRM_OP_ERROR_FATAL; break; case PCMK_OCF_UNIMPLEMENT_FEATURE: if (interval_ms > 0) { result = PCMK_LRM_OP_NOTSUPPORTED; break; } // fall through case PCMK_OCF_NOT_INSTALLED: case PCMK_OCF_INVALID_PARAM: case PCMK_OCF_INSUFFICIENT_PRIV: if (!pe_can_fence(data_set, node) && !strcmp(task, CRMD_ACTION_STOP)) { /* If a stop fails and we can't fence, there's nothing else we can do */ pe_proc_err("No further recovery can be attempted for %s " "because %s on %s failed (%s%s%s) at %s " CRM_XS " rc=%d id=%s", rsc->id, task, node->details->uname, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, last_change_str(xml_op), rc, ID(xml_op)); clear_bit(rsc->flags, pe_rsc_managed); set_bit(rsc->flags, pe_rsc_block); } result = PCMK_LRM_OP_ERROR_HARD; break; default: if (result == PCMK_LRM_OP_DONE) { crm_info("Treating unknown exit status %d from %s of %s " "on %s at %s as failure", rc, task, rsc->id, node->details->uname, last_change_str(xml_op)); result = PCMK_LRM_OP_ERROR; } break; } return result; } // return TRUE if start or monitor last failure but parameters changed static bool should_clear_for_param_change(xmlNode *xml_op, const char *task, pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { if (!strcmp(task, "start") || !strcmp(task, "monitor")) { if (pe__bundle_needs_remote_name(rsc)) { /* We haven't allocated resources yet, so we can't reliably * substitute addr parameters for the REMOTE_CONTAINER_HACK. * When that's needed, defer the check until later. */ pe__add_param_check(xml_op, rsc, node, pe_check_last_failure, data_set); } else { op_digest_cache_t *digest_data = NULL; digest_data = rsc_action_digest_cmp(rsc, xml_op, node, data_set); switch (digest_data->rc) { case RSC_DIGEST_UNKNOWN: crm_trace("Resource %s history entry %s on %s" " has no digest to compare", rsc->id, get_op_key(xml_op), node->details->id); break; case RSC_DIGEST_MATCH: break; default: return TRUE; } } } return FALSE; } // Order action after fencing of remote node, given connection rsc static void order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn, pe_working_set_t *data_set) { pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id); if (remote_node) { pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL, FALSE, data_set); order_actions(fence, action, pe_order_implies_then); } } static bool should_ignore_failure_timeout(pe_resource_t *rsc, xmlNode *xml_op, const char *task, guint interval_ms, bool is_last_failure, pe_working_set_t *data_set) { /* Clearing failures of recurring monitors has special concerns. The * executor reports only changes in the monitor result, so if the * monitor is still active and still getting the same failure result, * that will go undetected after the failure is cleared. * * Also, the operation history will have the time when the recurring * monitor result changed to the given code, not the time when the * result last happened. * * @TODO We probably should clear such failures only when the failure * timeout has passed since the last occurrence of the failed result. * However we don't record that information. We could maybe approximate * that by clearing only if there is a more recent successful monitor or * stop result, but we don't even have that information at this point * since we are still unpacking the resource's operation history. * * This is especially important for remote connection resources with a * reconnect interval, so in that case, we skip clearing failures * if the remote node hasn't been fenced. */ if (rsc->remote_reconnect_ms && is_set(data_set->flags, pe_flag_stonith_enabled) && (interval_ms != 0) && safe_str_eq(task, CRMD_ACTION_STATUS)) { pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); if (remote_node && !remote_node->details->remote_was_fenced) { if (is_last_failure) { crm_info("Waiting to clear monitor failure for remote node %s" " until fencing has occurred", rsc->id); } return TRUE; } } return FALSE; } /*! * \internal * \brief Check operation age and schedule failure clearing when appropriate * * This function has two distinct purposes. The first is to check whether an * operation history entry is expired (i.e. the resource has a failure timeout, * the entry is older than the timeout, and the resource either has no fail * count or its fail count is entirely older than the timeout). The second is to * schedule fail count clearing when appropriate (i.e. the operation is expired * and either the resource has an expired fail count or the operation is a * last_failure for a remote connection resource with a reconnect interval, * or the operation is a last_failure for a start or monitor operation and the * resource's parameters have changed since the operation). * * \param[in] rsc Resource that operation happened to * \param[in] node Node that operation happened on * \param[in] rc Actual result of operation * \param[in] xml_op Operation history entry XML * \param[in] data_set Current working set * * \return TRUE if operation history entry is expired, FALSE otherwise */ static bool check_operation_expiry(pe_resource_t *rsc, pe_node_t *node, int rc, xmlNode *xml_op, pe_working_set_t *data_set) { bool expired = FALSE; bool is_last_failure = pcmk__ends_with(ID(xml_op), "_last_failure_0"); time_t last_run = 0; guint interval_ms = 0; int unexpired_fail_count = 0; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *clear_reason = NULL; crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((rsc->failure_timeout > 0) && (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &last_run) == 0)) { // Resource has a failure-timeout, and history entry has a timestamp time_t now = get_effective_time(data_set); time_t last_failure = 0; // Is this particular operation history older than the failure timeout? if ((now >= (last_run + rsc->failure_timeout)) && !should_ignore_failure_timeout(rsc, xml_op, task, interval_ms, is_last_failure, data_set)) { expired = TRUE; } // Does the resource as a whole have an unexpired fail count? unexpired_fail_count = pe_get_failcount(node, rsc, &last_failure, pe_fc_effective, xml_op, data_set); // Update scheduler recheck time according to *last* failure crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds" " last-failure@%lld", ID(xml_op), (long long) last_run, (expired? "" : "not "), (long long) now, unexpired_fail_count, rsc->failure_timeout, (long long) last_failure); last_failure += rsc->failure_timeout + 1; if (unexpired_fail_count && (now < last_failure)) { pe__update_recheck_time(last_failure, data_set); } } if (expired) { if (pe_get_failcount(node, rsc, NULL, pe_fc_default, xml_op, data_set)) { // There is a fail count ignoring timeout if (unexpired_fail_count == 0) { // There is no fail count considering timeout clear_reason = "it expired"; } else { /* This operation is old, but there is an unexpired fail count. * In a properly functioning cluster, this should only be * possible if this operation is not a failure (otherwise the * fail count should be expired too), so this is really just a * failsafe. */ expired = FALSE; } } else if (is_last_failure && rsc->remote_reconnect_ms) { /* Clear any expired last failure when reconnect interval is set, * even if there is no fail count. */ clear_reason = "reconnect interval is set"; } } if (!expired && is_last_failure && should_clear_for_param_change(xml_op, task, rsc, node, data_set)) { clear_reason = "resource parameters have changed"; } if (clear_reason != NULL) { // Schedule clearing of the fail count pe_action_t *clear_op = pe__clear_failcount(rsc, node, clear_reason, data_set); if (is_set(data_set->flags, pe_flag_stonith_enabled) && rsc->remote_reconnect_ms) { /* If we're clearing a remote connection due to a reconnect * interval, we want to wait until any scheduled fencing * completes. * * We could limit this to remote_node->details->unclean, but at * this point, that's always true (it won't be reliable until * after unpack_node_loop() is done). */ crm_info("Clearing %s failure will wait until any scheduled " "fencing of %s completes", task, rsc->id); order_after_remote_fencing(clear_op, rsc, data_set); } } if (expired && (interval_ms == 0) && safe_str_eq(task, CRMD_ACTION_STATUS)) { switch(rc) { case PCMK_OCF_OK: case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_MASTER: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_MASTER: // Don't expire probes that return these values expired = FALSE; break; } } return expired; } int pe__target_rc_from_xml(xmlNode *xml_op) { int target_rc = 0; const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY); if (key == NULL) { return -1; } decode_transition_key(key, NULL, NULL, NULL, &target_rc); return target_rc; } static enum action_fail_response get_action_on_fail(pe_resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) { int result = action_fail_recover; pe_action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); result = action->on_fail; pe_free_action(action); return result; } static void update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, const char * task, int rc, xmlNode * last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) { gboolean clear_past_failure = FALSE; CRM_ASSERT(rsc); CRM_ASSERT(xml_op); if (rc == PCMK_OCF_NOT_RUNNING) { clear_past_failure = TRUE; } else if (rc == PCMK_OCF_NOT_INSTALLED) { rsc->role = RSC_ROLE_STOPPED; } else if (safe_str_eq(task, CRMD_ACTION_STATUS)) { if (last_failure) { const char *op_key = get_op_key(xml_op); const char *last_failure_key = get_op_key(last_failure); if (safe_str_eq(op_key, last_failure_key)) { clear_past_failure = TRUE; } } if (rsc->role < RSC_ROLE_STARTED) { set_active(rsc); } } else if (safe_str_eq(task, CRMD_ACTION_START)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_STOP)) { rsc->role = RSC_ROLE_STOPPED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) { /* Demote from Master does not clear an error */ rsc->role = RSC_ROLE_SLAVE; } else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { rsc->role = RSC_ROLE_STARTED; clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_MIGRATE)) { unpack_migrate_to_success(rsc, node, xml_op, data_set); } else if (rsc->role < RSC_ROLE_STARTED) { pe_rsc_trace(rsc, "%s active on %s", rsc->id, node->details->uname); set_active(rsc); } /* clear any previous failure actions */ if (clear_past_failure) { switch (*on_fail) { case action_fail_stop: case action_fail_fence: case action_fail_migrate: case action_fail_standby: pe_rsc_trace(rsc, "%s.%s is not cleared by a completed stop", rsc->id, fail2text(*on_fail)); break; case action_fail_block: case action_fail_ignore: case action_fail_recover: case action_fail_restart_container: *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; break; case action_fail_reset_remote: if (rsc->remote_reconnect_ms == 0) { /* With no reconnect interval, the connection is allowed to * start again after the remote node is fenced and * completely stopped. (With a reconnect interval, we wait * for the failure to be cleared entirely before attempting * to reconnect.) */ *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } break; } } } /*! * \internal * \brief Remap informational monitor results to usual values * * Certain OCF result codes are for providing extended information to the * user about services that aren't yet failed but not entirely healthy either. * These must be treated as the "normal" result by pacemaker. * * \param[in] rc Actual result of a monitor action * \param[in] xml_op Operation history XML * \param[in] node Node that operation happened on * \param[in] rsc Resource that operation happened to * \param[in] data_set Cluster working set * * \return Result code that pacemaker should use * * \note If the result is remapped, and the node is not shutting down or failed, * the operation will be recorded in the data set's list of failed * operations, to highlight it for the user. */ static int remap_monitor_rc(int rc, xmlNode *xml_op, const pe_node_t *node, const pe_resource_t *rsc, pe_working_set_t *data_set) { int remapped_rc = rc; switch (rc) { case PCMK_OCF_DEGRADED: remapped_rc = PCMK_OCF_OK; break; case PCMK_OCF_DEGRADED_MASTER: remapped_rc = PCMK_OCF_RUNNING_MASTER; break; default: break; } if (rc != remapped_rc) { crm_trace("Remapping monitor result %d to %d", rc, remapped_rc); if (!node->details->shutdown || node->details->online) { record_failed_op(xml_op, node, rsc, data_set); } } return remapped_rc; } static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *on_fail, pe_working_set_t *data_set) { int rc = 0; int task_id = 0; int target_rc = 0; int status = PCMK_LRM_OP_UNKNOWN; guint interval_ms = 0; const char *task = NULL; const char *task_key = NULL; const char *exit_reason = NULL; bool expired = FALSE; pe_resource_t *parent = rsc; enum action_fail_response failure_strategy = action_fail_recover; CRM_CHECK(rsc && node && xml_op, return); target_rc = pe__target_rc_from_xml(xml_op); task_key = get_op_key(xml_op); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); if (exit_reason == NULL) { exit_reason = ""; } crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc); crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id); crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); CRM_CHECK(task != NULL, return); CRM_CHECK(status <= PCMK_LRM_OP_INVALID, return); CRM_CHECK(status >= PCMK_LRM_OP_PENDING, return); if (!strcmp(task, CRMD_ACTION_NOTIFY) || !strcmp(task, CRMD_ACTION_METADATA)) { /* safe to ignore these */ return; } if (is_not_set(rsc->flags, pe_rsc_unique)) { parent = uber_parent(rsc); } pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)", task_key, task, task_id, status, rc, node->details->uname, role2text(rsc->role)); if (node->details->unclean) { pe_rsc_trace(rsc, "Node %s (where %s is running) is unclean." " Further action depends on the value of the stop's on-fail attribute", node->details->uname, rsc->id); } /* It should be possible to call remap_monitor_rc() first then call * check_operation_expiry() only if rc != target_rc, because there should * never be a fail count without at least one unexpected result in the * resource history. That would be more efficient by avoiding having to call * check_operation_expiry() for expected results. * * However, we do have such configurations in the scheduler regression * tests, even if it shouldn't be possible with the current code. It's * probably a good idea anyway, but that would require updating the test * inputs to something currently possible. */ if ((status != PCMK_LRM_OP_NOT_INSTALLED) && check_operation_expiry(rsc, node, rc, xml_op, data_set)) { expired = TRUE; } if (!strcmp(task, CRMD_ACTION_STATUS)) { rc = remap_monitor_rc(rc, xml_op, node, rsc, data_set); } if (expired && (rc != target_rc)) { const char *magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC); if (interval_ms == 0) { crm_notice("Ignoring expired %s failure on %s " CRM_XS " actual=%d expected=%d magic=%s", task_key, node->details->uname, rc, target_rc, magic); goto done; } else if(node->details->online && node->details->unclean == FALSE) { /* Reschedule the recurring monitor. CancelXmlOp() won't work at * this stage, so as a hacky workaround, forcibly change the restart * digest so check_action_definition() does what we want later. * * @TODO We should skip this if there is a newer successful monitor. * Also, this causes rescheduling only if the history entry * has an op-digest (which the expire-non-blocked-failure * scheduler regression test doesn't, but that may not be a * realistic scenario in production). */ crm_notice("Rescheduling %s after failure expired on %s " CRM_XS " actual=%d expected=%d magic=%s", task_key, node->details->uname, rc, target_rc, magic); crm_xml_add(xml_op, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout"); goto done; } } /* If the executor reported an operation status of anything but done or * error, consider that final. But for done or error, we know better whether * it should be treated as a failure or not, because we know the expected * result. */ if(status == PCMK_LRM_OP_DONE || status == PCMK_LRM_OP_ERROR) { status = determine_op_status(rsc, rc, target_rc, node, xml_op, on_fail, data_set); pe_rsc_trace(rsc, "Remapped %s status to %d", task_key, status); } switch (status) { case PCMK_LRM_OP_CANCELLED: // Should never happen pe_err("Resource history contains cancellation '%s' " "(%s of %s on %s at %s)", ID(xml_op), task, rsc->id, node->details->uname, last_change_str(xml_op)); break; case PCMK_LRM_OP_PENDING: if (!strcmp(task, CRMD_ACTION_START)) { set_bit(rsc->flags, pe_rsc_start_pending); set_active(rsc); } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if (!strcmp(task, CRMD_ACTION_MIGRATE) && node->details->unclean) { /* If a pending migrate_to action is out on a unclean node, * we have to force the stop action on the target. */ const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET); pe_node_t *target = pe_find_node(data_set->nodes, migrate_target); if (target) { stop_action(rsc, target, FALSE); } } if (rsc->pending_task == NULL) { if ((interval_ms != 0) || strcmp(task, CRMD_ACTION_STATUS)) { rsc->pending_task = strdup(task); rsc->pending_node = node; } else { /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, enable the below and the corresponding part of * native.c:native_pending_task(). */ #if 0 rsc->pending_task = strdup("probe"); rsc->pending_node = node; #endif } } break; case PCMK_LRM_OP_DONE: pe_rsc_trace(rsc, "%s of %s on %s completed at %s " CRM_XS " id=%s", task, rsc->id, node->details->uname, last_change_str(xml_op), ID(xml_op)); update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set); break; case PCMK_LRM_OP_NOT_INSTALLED: failure_strategy = get_action_on_fail(rsc, task_key, task, data_set); if (failure_strategy == action_fail_ignore) { crm_warn("Cannot ignore failed %s of %s on %s: " "Resource agent doesn't exist " CRM_XS " status=%d rc=%d id=%s", task, rsc->id, node->details->uname, status, rc, ID(xml_op)); /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */ *on_fail = action_fail_migrate; } resource_location(parent, node, -INFINITY, "hard-error", data_set); unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set); break; case PCMK_LRM_OP_NOT_CONNECTED: if (pe__is_guest_or_remote_node(node) && is_set(node->details->remote_rsc->flags, pe_rsc_managed)) { /* We should never get into a situation where a managed remote * connection resource is considered OK but a resource action * behind the connection gets a "not connected" status. But as a * fail-safe in case a bug or unusual circumstances do lead to * that, ensure the remote connection is considered failed. */ set_bit(node->details->remote_rsc->flags, pe_rsc_failed); } // fall through case PCMK_LRM_OP_ERROR: case PCMK_LRM_OP_ERROR_HARD: case PCMK_LRM_OP_ERROR_FATAL: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_NOTSUPPORTED: case PCMK_LRM_OP_INVALID: failure_strategy = get_action_on_fail(rsc, task_key, task, data_set); if ((failure_strategy == action_fail_ignore) || (failure_strategy == action_fail_restart_container && !strcmp(task, CRMD_ACTION_STOP))) { crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s " "succeeded " CRM_XS " rc=%d id=%s", task, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rsc->id, node->details->uname, last_change_str(xml_op), rc, ID(xml_op)); update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure, on_fail, data_set); crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); set_bit(rsc->flags, pe_rsc_failure_ignored); record_failed_op(xml_op, node, rsc, data_set); if (failure_strategy == action_fail_restart_container && *on_fail <= action_fail_recover) { *on_fail = failure_strategy; } } else { unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set); if(status == PCMK_LRM_OP_ERROR_HARD) { do_crm_log(rc != PCMK_OCF_NOT_INSTALLED?LOG_ERR:LOG_NOTICE, "Preventing %s from restarting on %s because " "of hard failure (%s%s%s)" CRM_XS " rc=%d id=%s", parent->id, node->details->uname, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rc, ID(xml_op)); resource_location(parent, node, -INFINITY, "hard-error", data_set); } else if(status == PCMK_LRM_OP_ERROR_FATAL) { crm_err("Preventing %s from restarting anywhere because " "of fatal failure (%s%s%s) " CRM_XS " rc=%d id=%s", parent->id, services_ocf_exitcode_str(rc), (*exit_reason? ": " : ""), exit_reason, rc, ID(xml_op)); resource_location(parent, NULL, -INFINITY, "fatal-error", data_set); } } break; } done: pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s", rsc->id, task, role2text(rsc->role), role2text(rsc->next_role)); } static void add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, pe_working_set_t *data_set) { const char *cluster_name = NULL; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_UNAME), strdup(node->details->uname)); g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID), strdup(node->details->id)); if (safe_str_eq(node->details->id, data_set->dc_uuid)) { data_set->dc_node = node; node->details->is_dc = TRUE; g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE)); } else { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE)); } cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name"); if (cluster_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME), strdup(cluster_name)); } pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, NULL, node->details->attrs, NULL, overwrite, data_set); if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) { const char *site_name = pe_node_attribute_raw(node, "site-name"); if (site_name) { g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(site_name)); } else if (cluster_name) { /* Default to cluster-name if unset */ g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_SITE_NAME), strdup(cluster_name)); } } } static GListPtr extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GListPtr gIter = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { crm_xml_add(rsc_op, "resource", rsc); crm_xml_add(rsc_op, XML_ATTR_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if (active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; if (start_index < stop_index) { crm_trace("Skipping %s: not active", ID(rsc_entry)); break; } else if (counter < start_index) { crm_trace("Skipping %s: old", ID(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GListPtr find_operations(const char *rsc, const char *node, gboolean active_filter, pe_working_set_t * data_set) { GListPtr output = NULL; GListPtr intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE); pe_node_t *this_node = NULL; xmlNode *node_state = NULL; for (node_state = __xml_first_child_element(status); node_state != NULL; node_state = __xml_next_element(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { const char *uname = crm_element_value(node_state, XML_ATTR_UNAME); if (node != NULL && safe_str_neq(uname, node)) { continue; } this_node = pe_find_node(data_set->nodes, uname); if(this_node == NULL) { CRM_LOG_ASSERT(this_node != NULL); continue; } else if (pe__is_guest_or_remote_node(this_node)) { determine_remote_online_status(data_set, this_node); } else { determine_online_status(node_state, this_node, data_set); } if (this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE); for (lrm_rsc = __xml_first_child_element(tmp); lrm_rsc != NULL; lrm_rsc = __xml_next_element(lrm_rsc)) { if (crm_str_eq((const char *)lrm_rsc->name, XML_LRM_TAG_RESOURCE, TRUE)) { const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID); if (rsc != NULL && safe_str_neq(rsc_id, rsc)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } } } return output; } diff --git a/lib/pengine/unpack.h b/lib/pengine/unpack.h index 42117fcfbe..6f7226a05e 100644 --- a/lib/pengine/unpack.h +++ b/lib/pengine/unpack.h @@ -1,58 +1,55 @@ /* - * Copyright 2004-2019 the Pacemaker project contributors + * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PENGINE_UNPACK__H # define PENGINE_UNPACK__H extern gboolean unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set); extern gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set); extern gboolean unpack_config(xmlNode * config, pe_working_set_t * data_set); extern gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set); extern gboolean unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set); extern gboolean unpack_status(xmlNode * status, pe_working_set_t * data_set); extern gint sort_op_by_callid(gconstpointer a, gconstpointer b); extern gboolean unpack_lrm_resources(pe_node_t * node, xmlNode * lrm_state, pe_working_set_t * data_set); -extern gboolean determine_online_status(xmlNode * node_state, pe_node_t * this_node, - pe_working_set_t * data_set); - // Some warnings we don't want to print every transition enum pe_warn_once_e { pe_wo_blind = 0x0001, pe_wo_restart_type = 0x0002, pe_wo_role_after = 0x0004, pe_wo_poweroff = 0x0008, pe_wo_require_all = 0x0010, pe_wo_order_score = 0x0020, pe_wo_neg_threshold = 0x0040, }; extern uint32_t pe_wo; #define pe_warn_once(pe_wo_bit, fmt...) do { \ if (is_not_set(pe_wo, pe_wo_bit)) { \ if (pe_wo_bit == pe_wo_blind) { \ crm_warn(fmt); \ } else { \ pe_warn(fmt); \ } \ set_bit(pe_wo, pe_wo_bit); \ } \ } while (0); #endif diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index 9a8c2f0ac7..dbc73b8d4f 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,2766 +1,2766 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, pe_working_set_t * data_set); static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled); #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t * pe_rsc_action_details(pe_action_t *action) { pe_rsc_action_details_t *details; CRM_CHECK(action != NULL, return NULL); if (action->action_details == NULL) { action->action_details = calloc(1, sizeof(pe_rsc_action_details_t)); CRM_CHECK(action->action_details != NULL, return NULL); } details = (pe_rsc_action_details_t *) action->action_details; if (details->versioned_parameters == NULL) { details->versioned_parameters = create_xml_node(NULL, XML_TAG_OP_VER_ATTRS); } if (details->versioned_meta == NULL) { details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); } return details; } static void pe_free_rsc_action_details(pe_action_t *action) { pe_rsc_action_details_t *details; if ((action == NULL) || (action->action_details == NULL)) { return; } details = (pe_rsc_action_details_t *) action->action_details; if (details->versioned_parameters) { free_xml(details->versioned_parameters); } if (details->versioned_meta) { free_xml(details->versioned_meta); } action->action_details = NULL; } #endif /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] data_set Working set for cluster * \param[in] node Name of node to check * * \return true if node can be fenced, false otherwise */ bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node) { if (pe__is_guest_node(node)) { /* Guest nodes are fenced by stopping their container resource. We can * do that if the container's host is either online or fenceable. */ pe_resource_t *rsc = node->details->remote_rsc->container; for (GList *n = rsc->running_on; n != NULL; n = n->next) { pe_node_t *container_node = n->data; if (!container_node->details->online && !pe_can_fence(data_set, container_node)) { return false; } } return true; } else if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) { return false; /* Turned off */ } else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) { return false; /* No devices */ } else if (is_set(data_set->flags, pe_flag_have_quorum)) { return true; } else if (data_set->no_quorum_policy == no_quorum_ignore) { return true; } else if(node == NULL) { return false; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); return true; } crm_trace("Cannot fence %s", node->details->uname); return false; } pe_node_t * node_copy(const pe_node_t *this_node) { pe_node_t *new_node = NULL; CRM_CHECK(this_node != NULL, return NULL); new_node = calloc(1, sizeof(pe_node_t)); CRM_ASSERT(new_node != NULL); crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node); new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; new_node->details = this_node->details; return new_node; } /* any node in list1 or list2 and not in the other gets a score of -INFINITY */ void node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores) { GHashTable *result = hash; pe_node_t *other_node = NULL; GListPtr gIter = list; GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { other_node = pe_find_node_id(list, node->details->id); if (other_node == NULL) { node->weight = -INFINITY; } else if (merge_scores) { node->weight = merge_weights(node->weight, other_node->weight); } } for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; other_node = pe_hash_table_lookup(result, node->details->id); if (other_node == NULL) { pe_node_t *new_node = node_copy(node); new_node->weight = -INFINITY; g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } } } GHashTable * node_hash_from_list(GListPtr list) { GListPtr gIter = list; GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free); for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pe_node_t *n = node_copy(node); g_hash_table_insert(result, (gpointer) n->details->id, n); } return result; } GListPtr node_list_dup(GListPtr list1, gboolean reset, gboolean filter) { GListPtr result = NULL; GListPtr gIter = list1; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *new_node = NULL; pe_node_t *this_node = (pe_node_t *) gIter->data; if (filter && this_node->weight < 0) { continue; } new_node = node_copy(this_node); if (reset) { new_node->weight = 0; } if (new_node != NULL) { result = g_list_prepend(result, new_node); } } return result; } gint sort_node_uname(gconstpointer a, gconstpointer b) { const char *name_a = ((const pe_node_t *) a)->details->uname; const char *name_b = ((const pe_node_t *) b)->details->uname; while (*name_a && *name_b) { if (isdigit(*name_a) && isdigit(*name_b)) { // If node names contain a number, sort numerically char *end_a = NULL; char *end_b = NULL; long num_a = strtol(name_a, &end_a, 10); long num_b = strtol(name_b, &end_b, 10); // allow ordering e.g. 007 > 7 size_t len_a = end_a - name_a; size_t len_b = end_b - name_b; if (num_a < num_b) { return -1; } else if (num_a > num_b) { return 1; } else if (len_a < len_b) { return -1; } else if (len_a > len_b) { return 1; } name_a = end_a; name_b = end_b; } else { // Compare non-digits case-insensitively int lower_a = tolower(*name_a); int lower_b = tolower(*name_b); if (lower_a < lower_b) { return -1; } else if (lower_a > lower_b) { return 1; } ++name_a; ++name_b; } } if (!*name_a && *name_b) { return -1; } else if (*name_a && !*name_b) { return 1; } return 0; } /*! * \internal * \brief Output node weights to stdout * * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes */ static void pe__output_node_weights(pe_resource_t *rsc, const char *comment, GHashTable *nodes) { char score[128]; // Stack-allocated since this is called frequently // Sort the nodes so the output is consistent for regression tests GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname); for (GList *gIter = list; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; score2char_stack(node->weight, score, sizeof(score)); if (rsc) { printf("%s: %s allocation score on %s: %s\n", comment, rsc->id, node->details->uname, score); } else { printf("%s: %s = %s\n", comment, node->details->uname, score); } } g_list_free(list); } /*! * \internal * \brief Log node weights at trace level * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes */ static void pe__log_node_weights(const char *file, const char *function, int line, pe_resource_t *rsc, const char *comment, GHashTable *nodes) { GHashTableIter iter; pe_node_t *node = NULL; char score[128]; // Stack-allocated since this is called frequently // Don't waste time if we're not tracing at this point pcmk__log_else(LOG_TRACE, return); g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { score2char_stack(node->weight, score, sizeof(score)); if (rsc) { qb_log_from_external_source(function, file, "%s: %s allocation score on %s: %s", LOG_TRACE, line, 0, comment, rsc->id, node->details->uname, score); } else { qb_log_from_external_source(function, file, "%s: %s = %s", LOG_TRACE, line, 0, comment, node->details->uname, score); } } } /*! * \internal * \brief Log or output node weights * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] to_log Log if true, otherwise output * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes */ void pe__show_node_weights_as(const char *file, const char *function, int line, bool to_log, pe_resource_t *rsc, const char *comment, GHashTable *nodes) { if (rsc != NULL) { if (is_set(rsc->flags, pe_rsc_orphan)) { // Don't show allocation scores for orphans return; } nodes = rsc->allowed_nodes; } if (nodes == NULL) { // Nothing to show return; } if (to_log) { pe__log_node_weights(file, function, line, rsc, comment, nodes); } else { pe__output_node_weights(rsc, comment, nodes); } // If this resource has children, repeat recursively for each if (rsc && rsc->children) { for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; pe__show_node_weights_as(file, function, line, to_log, child, comment, nodes); } } } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; char *new_text = crm_strdup_printf("%s %s=%s", *dump_text, (char *)key, (char *)value); free(*dump_text); *dump_text = new_text; } void dump_node_capacity(int level, const char *comment, pe_node_t * node) { char *dump_text = crm_strdup_printf("%s: %s capacity:", comment, node->details->uname); g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text); if (level == LOG_STDOUT) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } void dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node) { char *dump_text = crm_strdup_printf("%s: %s utilization on %s:", comment, rsc->id, node->details->uname); g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text); switch (level) { case LOG_STDOUT: fprintf(stdout, "%s\n", dump_text); break; case LOG_NEVER: break; default: crm_trace("%s", dump_text); } free(dump_text); } gint sort_rsc_index(gconstpointer a, gconstpointer b) { const pe_resource_t *resource1 = (const pe_resource_t *)a; const pe_resource_t *resource2 = (const pe_resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->sort_index > resource2->sort_index) { return -1; } if (resource1->sort_index < resource2->sort_index) { return 1; } return 0; } gint sort_rsc_priority(gconstpointer a, gconstpointer b) { const pe_resource_t *resource1 = (const pe_resource_t *)a; const pe_resource_t *resource2 = (const pe_resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priority > resource2->priority) { return -1; } if (resource1->priority < resource2->priority) { return 1; } return 0; } pe_action_t * custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node, gboolean optional, gboolean save_action, pe_working_set_t * data_set) { pe_action_t *action = NULL; GListPtr possible_matches = NULL; CRM_CHECK(key != NULL, return NULL); CRM_CHECK(task != NULL, free(key); return NULL); if (save_action && rsc != NULL) { possible_matches = find_actions(rsc->actions, key, on_node); } else if(save_action) { #if 0 action = g_hash_table_lookup(data_set->singletons, key); #else /* More expensive but takes 'node' into account */ possible_matches = find_actions(data_set->actions, key, on_node); #endif } if(data_set->singletons == NULL) { data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); } if (possible_matches != NULL) { if (pcmk__list_of_multiple(possible_matches)) { pe_warn("Action %s for %s on %s exists %d times", task, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", g_list_length(possible_matches)); } action = g_list_nth_data(possible_matches, 0); pe_rsc_trace(rsc, "Found existing action %d (%s) for %s (%s) on %s", action->id, action->uuid, (rsc? rsc->id : "no resource"), task, (on_node? on_node->details->uname : "no node")); g_list_free(possible_matches); } if (action == NULL) { if (save_action) { pe_rsc_trace(rsc, "Creating %s action %d: %s for %s (%s) on %s", - (optional? "optional" : " mandatory"), + (optional? "optional" : "mandatory"), data_set->action_id, key, (rsc? rsc->id : "no resource"), task, (on_node? on_node->details->uname : "no node")); } action = calloc(1, sizeof(pe_action_t)); if (save_action) { action->id = data_set->action_id++; } else { action->id = 0; } action->rsc = rsc; CRM_ASSERT(task != NULL); action->task = strdup(task); if (on_node) { action->node = node_copy(on_node); } action->uuid = strdup(key); if (safe_str_eq(task, CRM_OP_LRM_DELETE)) { // Resource history deletion for a node can be done on the DC pe_set_action_bit(action, pe_action_dc); } pe_set_action_bit(action, pe_action_runnable); if (optional) { pe_set_action_bit(action, pe_action_optional); } else { pe_clear_action_bit(action, pe_action_optional); } action->extra = crm_str_table_new(); action->meta = crm_str_table_new(); if (save_action) { data_set->actions = g_list_prepend(data_set->actions, action); if(rsc == NULL) { g_hash_table_insert(data_set->singletons, action->uuid, action); } } if (rsc != NULL) { action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); unpack_operation(action, action->op_entry, rsc->container, data_set); if (save_action) { rsc->actions = g_list_prepend(rsc->actions, action); } } if (save_action) { pe_rsc_trace(rsc, "Action %d created", action->id); } } if (!optional && is_set(action->flags, pe_action_optional)) { pe_rsc_trace(rsc, "Unset optional on action %d", action->id); pe_clear_action_bit(action, pe_action_optional); } if (rsc != NULL) { enum action_tasks a_task = text2task(action->task); int warn_level = LOG_TRACE; if (save_action) { warn_level = LOG_WARNING; } if (is_set(action->flags, pe_action_have_node_attrs) == FALSE && action->node != NULL && action->op_entry != NULL) { pe_set_action_bit(action, pe_action_have_node_attrs); pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, action->node->details->attrs, action->extra, NULL, FALSE, data_set); } if (is_set(action->flags, pe_action_pseudo)) { /* leave untouched */ } else if (action->node == NULL) { pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid); pe_clear_action_bit(action, pe_action_runnable); } else if (is_not_set(rsc->flags, pe_rsc_managed) && g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS) == NULL) { crm_debug("Action %s (unmanaged)", action->uuid); pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); /* action->runnable = FALSE; */ } else if (is_not_set(action->flags, pe_action_dc) && !(action->node->details->online) && (!pe__is_guest_node(action->node) || action->node->details->remote_requires_reset)) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)", action->uuid, action->node->details->uname); if (is_set(action->rsc->flags, pe_rsc_managed) && save_action && a_task == stop_rsc && action->node->details->unclean == FALSE) { pe_fence_node(data_set, action->node, "resource actions are unrunnable", FALSE); } } else if (is_not_set(action->flags, pe_action_dc) && action->node->details->pending) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)", action->uuid, action->node->details->uname); } else if (action->needs == rsc_req_nothing) { pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid); pe_action_set_reason(action, NULL, TRUE); if (pe__is_guest_node(action->node) && !pe_can_fence(data_set, action->node)) { /* An action that requires nothing usually does not require any * fencing in order to be runnable. However, there is an * exception: an action cannot be completed if it is on a guest * node whose host is unclean and cannot be fenced. */ pe_clear_action_bit(action, pe_action_runnable); crm_debug("%s\t%s (cancelled : host cannot be fenced)", action->node->details->uname, action->uuid); } else { pe_set_action_bit(action, pe_action_runnable); } #if 0 /* * No point checking this * - if we don't have quorum we can't stonith anyway */ } else if (action->needs == rsc_req_stonith) { crm_trace("Action %s requires only stonith", action->uuid); action->runnable = TRUE; #endif } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_stop) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)", action->node->details->uname, action->uuid); } } else if(is_not_set(action->flags, pe_action_runnable)) { pe_rsc_trace(rsc, "Action %s is runnable", action->uuid); //pe_action_set_reason(action, NULL, TRUE); pe_set_action_bit(action, pe_action_runnable); } if (save_action) { switch (a_task) { case stop_rsc: set_bit(rsc->flags, pe_rsc_stopping); break; case start_rsc: clear_bit(rsc->flags, pe_rsc_starting); if (is_set(action->flags, pe_action_runnable)) { set_bit(rsc->flags, pe_rsc_starting); } break; default: break; } } } free(key); return action; } static const char * unpack_operation_on_fail(pe_action_t * action) { const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop " "action to default value because 'standby' is not " "allowed for stop", action->rsc->id); return NULL; } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { /* demote on_fail defaults to master monitor value if present */ xmlNode *operation = NULL; const char *name = NULL; const char *role = NULL; const char *on_fail = NULL; const char *interval_spec = NULL; const char *enabled = NULL; CRM_CHECK(action->rsc != NULL, return NULL); for (operation = __xml_first_child_element(action->rsc->ops_xml); operation && !value; operation = __xml_next_element(operation)) { if (!crm_str_eq((const char *)operation->name, "op", TRUE)) { continue; } name = crm_element_value(operation, "name"); role = crm_element_value(operation, "role"); on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); enabled = crm_element_value(operation, "enabled"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (!on_fail) { continue; } else if (enabled && !crm_is_true(enabled)) { continue; } else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) { continue; } else if (crm_parse_interval_spec(interval_spec) == 0) { continue; } value = on_fail; } } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) { value = "ignore"; } return value; } static xmlNode * find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled) { guint interval_ms = 0; guint min_interval_ms = G_MAXUINT; const char *name = NULL; const char *value = NULL; const char *interval_spec = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } if (safe_str_neq(name, RSC_STATUS)) { continue; } interval_ms = crm_parse_interval_spec(interval_spec); if (interval_ms && (interval_ms < min_interval_ms)) { min_interval_ms = interval_ms; op = operation; } } } return op; } static int unpack_start_delay(const char *value, GHashTable *meta) { int start_delay = 0; if (value != NULL) { start_delay = crm_get_msec(value); if (start_delay < 0) { start_delay = 0; } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } } return start_delay; } // true if value contains valid, non-NULL interval origin for recurring op static bool unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms, crm_time_t *now, long long *start_delay) { long long result = 0; guint interval_sec = interval_ms / 1000; crm_time_t *origin = NULL; // Ignore unspecified values and non-recurring operations if ((value == NULL) || (interval_ms == 0) || (now == NULL)) { return false; } // Parse interval origin from text origin = crm_time_new(value); if (origin == NULL) { pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation " "'%s' because '%s' is not valid", (ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value); return false; } // Get seconds since origin (negative if origin is in the future) result = crm_time_get_seconds(now) - crm_time_get_seconds(origin); crm_time_free(origin); // Calculate seconds from closest interval to now result = result % interval_sec; // Calculate seconds remaining until next interval result = ((result <= 0)? 0 : interval_sec) - result; crm_info("Calculated a start delay of %llds for operation '%s'", result, (ID(xml_obj)? ID(xml_obj) : "(unspecified)")); if (start_delay != NULL) { *start_delay = result * 1000; // milliseconds } return true; } static int unpack_timeout(const char *value) { int timeout = crm_get_msec(value); if (timeout < 0) { timeout = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } return timeout; } int pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set) { xmlNode *child = NULL; const char *timeout = NULL; int timeout_ms = 0; for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); child != NULL; child = crm_next_same_xml(child)) { if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) { timeout = crm_element_value(child, XML_ATTR_TIMEOUT); break; } } if (timeout == NULL && data_set->op_defaults) { GHashTable *action_meta = crm_str_table_new(); pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL, action_meta, NULL, FALSE, data_set); timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); } // @TODO check meta-attributes (including versioned meta-attributes) // @TODO maybe use min-interval monitor timeout as default for monitors timeout_ms = crm_get_msec(timeout); if (timeout_ms < 0) { timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } return timeout_ms; } #if ENABLE_VERSIONED_ATTRS static void unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, guint interval_ms, crm_time_t *now) { xmlNode *attrs = NULL; xmlNode *attr = NULL; for (attrs = __xml_first_child_element(versioned_meta); attrs != NULL; attrs = __xml_next_element(attrs)) { for (attr = __xml_first_child_element(attrs); attr != NULL; attr = __xml_next_element(attr)) { const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) { int start_delay = unpack_start_delay(value, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) { long long start_delay = 0; if (unpack_interval_origin(value, xml_obj, interval_ms, now, &start_delay)) { crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY); crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } } else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) { int timeout = unpack_timeout(value); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout); } } } } #endif /*! * \brief Unpack operation XML into an action structure * * Unpack an operation's meta-attributes (normalizing the interval, timeout, * and start delay values as integer milliseconds), requirements, and * failure policy. * * \param[in,out] action Action to unpack into * \param[in] xml_obj Operation XML (or NULL if all defaults) * \param[in] container Resource that contains affected resource, if any * \param[in] data_set Cluster state */ void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, pe_working_set_t * data_set) { guint interval_ms = 0; int timeout = 0; char *value_ms = NULL; const char *value = NULL; const char *field = NULL; char *default_timeout = NULL; #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t *rsc_details = NULL; #endif CRM_CHECK(action && action->rsc, return); // Cluster-wide pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set); // Probe timeouts default differently, so handle timeout default later default_timeout = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); if (default_timeout) { default_timeout = strdup(default_timeout); g_hash_table_remove(action->meta, XML_ATTR_TIMEOUT); } if (xml_obj) { xmlAttrPtr xIter = NULL; // take precedence over defaults pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, NULL, action->meta, NULL, TRUE, data_set); #if ENABLE_VERSIONED_ATTRS rsc_details = pe_rsc_action_details(action); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, rsc_details->versioned_parameters, data_set->now, NULL); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, rsc_details->versioned_meta, data_set->now, NULL); #endif /* Anything set as an XML property has highest precedence. * This ensures we use the name and interval from the tag. */ for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_obj, prop_name); g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); } } g_hash_table_remove(action->meta, "id"); // Normalize interval to milliseconds field = XML_LRM_ATTR_INTERVAL; value = g_hash_table_lookup(action->meta, field); if (value != NULL) { interval_ms = crm_parse_interval_spec(value); } else if ((xml_obj == NULL) && !strcmp(action->task, RSC_STATUS)) { /* An orphaned recurring monitor will not have any XML. However, we * want the interval to be set, so the action can be properly detected * as a recurring monitor. Parse it from the key in this case. */ parse_op_key(action->uuid, NULL, NULL, &interval_ms); } if (interval_ms > 0) { value_ms = crm_strdup_printf("%u", interval_ms); g_hash_table_replace(action->meta, strdup(field), value_ms); } else if (value) { g_hash_table_remove(action->meta, field); } // Handle timeout default, now that we know the interval if (g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT)) { free(default_timeout); } else { // Probe timeouts default to minimum-interval monitor's if (safe_str_eq(action->task, RSC_STATUS) && (interval_ms == 0)) { xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); if (min_interval_mon) { value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); if (value) { crm_trace("\t%s defaults to minimum-interval monitor's timeout '%s'", action->uuid, value); free(default_timeout); default_timeout = strdup(value); } } } if (default_timeout) { g_hash_table_insert(action->meta, strdup(XML_ATTR_TIMEOUT), default_timeout); } } if (safe_str_neq(action->task, RSC_START) && safe_str_neq(action->task, RSC_PROMOTE)) { action->needs = rsc_req_nothing; value = "nothing (not start/promote)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) { action->needs = rsc_req_stonith; value = "fencing (resource)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) { action->needs = rsc_req_quorum; value = "quorum (resource)"; } else { action->needs = rsc_req_nothing; value = "nothing (resource)"; } pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->uuid, value); value = unpack_operation_on_fail(action); if (value == NULL) { } else if (safe_str_eq(value, "block")) { action->on_fail = action_fail_block; g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); value = "block"; // The above could destroy the original string } else if (safe_str_eq(value, "fence")) { action->on_fail = action_fail_fence; value = "node fencing"; if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for " "operation '%s' to 'stop' because 'fence' is not " "valid when fencing is disabled", action->uuid); action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } } else if (safe_str_eq(value, "standby")) { action->on_fail = action_fail_standby; value = "node standby"; } else if (safe_str_eq(value, "ignore") || safe_str_eq(value, "nothing")) { action->on_fail = action_fail_ignore; value = "ignore"; } else if (safe_str_eq(value, "migrate")) { action->on_fail = action_fail_migrate; value = "force migration"; } else if (safe_str_eq(value, "stop")) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } else if (safe_str_eq(value, "restart")) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate)"; } else if (safe_str_eq(value, "restart-container")) { if (container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate)"; } else { value = NULL; } } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; } /* defaults */ if (value == NULL && container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate) (default)"; /* For remote nodes, ensure that any failure that results in dropping an * active connection to the node results in fencing of the node. * * There are only two action failures that don't result in fencing. * 1. probes - probe failures are expected. * 2. start - a start failure indicates that an active connection does not already * exist. The user can set op on-fail=fence if they really want to fence start * failures. */ } else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) && (pe__resource_is_remote_conn(action->rsc, data_set) && !(safe_str_eq(action->task, CRMD_ACTION_STATUS) && (interval_ms == 0)) && (safe_str_neq(action->task, CRMD_ACTION_START)))) { if (!is_set(action->rsc->flags, pe_rsc_managed)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop unmanaged remote node (enforcing default)"; } else { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fence remote node (default)"; } else { value = "recover remote node connection (default)"; } if (action->rsc->remote_reconnect_ms) { action->fail_role = RSC_ROLE_STOPPED; } action->on_fail = action_fail_reset_remote; } } else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { action->on_fail = action_fail_fence; value = "resource fence (default)"; } else { action->on_fail = action_fail_block; value = "resource block (default)"; } } else if (value == NULL) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate) (default)"; } pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value); value = NULL; if (xml_obj != NULL) { value = g_hash_table_lookup(action->meta, "role_after_failure"); if (value) { pe_warn_once(pe_wo_role_after, "Support for role_after_failure is deprecated and will be removed in a future release"); } } if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { action->fail_role = text2role(value); } /* defaults */ if (action->fail_role == RSC_ROLE_UNKNOWN) { if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) { action->fail_role = RSC_ROLE_SLAVE; } else { action->fail_role = RSC_ROLE_STARTED; } } pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task, role2text(action->fail_role)); value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); if (value) { unpack_start_delay(value, action->meta); } else { long long start_delay = 0; value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now, &start_delay)) { g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY), crm_strdup_printf("%lld", start_delay)); } } value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); timeout = unpack_timeout(value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout)); #if ENABLE_VERSIONED_ATTRS unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms, data_set->now); #endif } static xmlNode * find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled) { guint interval_ms = 0; gboolean do_retry = TRUE; char *local_key = NULL; const char *name = NULL; const char *value = NULL; const char *interval_spec = NULL; char *match_key = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; retry: for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } interval_ms = crm_parse_interval_spec(interval_spec); match_key = pcmk__op_key(rsc->id, name, interval_ms); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); if (rsc->clone_name) { match_key = pcmk__op_key(rsc->clone_name, name, interval_ms); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); } if (op != NULL) { free(local_key); return op; } } } free(local_key); if (do_retry == FALSE) { return NULL; } do_retry = FALSE; if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { local_key = pcmk__op_key(rsc->id, "migrate", 0); key = local_key; goto retry; } else if (strstr(key, "_notify_")) { local_key = pcmk__op_key(rsc->id, "notify", 0); key = local_key; goto retry; } return NULL; } xmlNode * find_rsc_op_entry(pe_resource_t * rsc, const char *key) { return find_rsc_op_entry_helper(rsc, key, FALSE); } void print_node(const char *pre_text, pe_node_t * node, gboolean details) { if (node == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } CRM_ASSERT(node->details); crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", node->details->online ? "" : "Unavailable/Unclean ", node->details->uname, node->weight, node->fixed ? "True" : "False"); if (details) { int log_level = LOG_TRACE; char *pe_mutable = strdup("\t\t"); GListPtr gIter = node->details->running_rsc; crm_trace("\t\t===Node Attributes"); g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable); free(pe_mutable); crm_trace("\t\t=== Resources"); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; rsc->fns->print(rsc, "\t\t", pe_print_log|pe_print_pending, &log_level); } } } /* * Used by the HashTable for-loop */ void print_str_str(gpointer key, gpointer value, gpointer user_data) { crm_trace("%s%s %s ==> %s", user_data == NULL ? "" : (char *)user_data, user_data == NULL ? "" : ": ", (char *)key, (char *)value); } void pe_free_action(pe_action_t * action) { if (action == NULL) { return; } g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */ g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */ if (action->extra) { g_hash_table_destroy(action->extra); } if (action->meta) { g_hash_table_destroy(action->meta); } #if ENABLE_VERSIONED_ATTRS if (action->rsc) { pe_free_rsc_action_details(action); } #endif free(action->cancel_task); free(action->reason); free(action->task); free(action->uuid); free(action->node); free(action); } GListPtr find_recurring_actions(GListPtr input, pe_node_t * not_on_node) { const char *value = NULL; GListPtr result = NULL; GListPtr gIter = input; CRM_CHECK(input != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS); if (value == NULL) { /* skip */ } else if (safe_str_eq(value, "0")) { /* skip */ } else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) { /* skip */ } else if (not_on_node == NULL) { crm_trace("(null) Found: %s", action->uuid); result = g_list_prepend(result, action); } else if (action->node == NULL) { /* skip */ } else if (action->node->details != not_on_node->details) { crm_trace("Found: %s", action->uuid); result = g_list_prepend(result, action); } } return result; } enum action_tasks get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic) { enum action_tasks task = text2task(name); if (rsc == NULL) { return task; } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { switch (task) { case stopped_rsc: case started_rsc: case action_demoted: case action_promoted: crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); return task - 1; break; default: break; } } return task; } pe_action_t * find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t * on_node) { GListPtr gIter = NULL; CRM_CHECK(uuid || task, return NULL); for (gIter = input; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (uuid != NULL && safe_str_neq(uuid, action->uuid)) { continue; } else if (task != NULL && safe_str_neq(task, action->task)) { continue; } else if (on_node == NULL) { return action; } else if (action->node == NULL) { continue; } else if (on_node->details == action->node->details) { return action; } } return NULL; } GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (safe_str_neq(key, action->uuid)) { crm_trace("%s does not match action %s", key, action->uuid); continue; } else if (on_node == NULL) { crm_trace("Action %s matches (ignoring node)", key); result = g_list_prepend(result, action); } else if (action->node == NULL) { crm_trace("Action %s matches (unallocated, assigning to %s)", key, on_node->details->uname); action->node = node_copy(on_node); result = g_list_prepend(result, action); } else if (on_node->details == action->node->details) { crm_trace("Action %s on %s matches", key, on_node->details->uname); result = g_list_prepend(result, action); } else { crm_trace("Action %s on node %s does not match requested node %s", key, action->node->details->uname, on_node->details->uname); } } return result; } GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node) { GList *result = NULL; CRM_CHECK(key != NULL, return NULL); if (on_node == NULL) { crm_trace("Not searching for action %s because node not specified", key); return NULL; } for (GList *gIter = input; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (action->node == NULL) { crm_trace("Skipping comparison of %s vs action %s without node", key, action->uuid); } else if (safe_str_neq(key, action->uuid)) { crm_trace("Desired action %s doesn't match %s", key, action->uuid); } else if (safe_str_neq(on_node->details->id, action->node->details->id)) { crm_trace("Action %s desired node ID %s doesn't match %s", key, on_node->details->id, action->node->details->id); } else { crm_trace("Action %s matches", key); result = g_list_prepend(result, action); } } return result; } /*! * \brief Find all actions of given type for a resource * * \param[in] rsc Resource to search * \param[in] node Find only actions scheduled on this node * \param[in] task Action name to search for * \param[in] require_node If TRUE, NULL node or action node will not match * * \return List of actions found (or NULL if none) * \note If node is not NULL and require_node is FALSE, matching actions * without a node will be assigned to node. */ GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node) { GList *result = NULL; char *key = pcmk__op_key(rsc->id, task, 0); if (require_node) { result = find_actions_exact(rsc->actions, key, node); } else { result = find_actions(rsc->actions, key, node); } free(key); return result; } static void resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag) { pe_node_t *match = NULL; if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never)) && safe_str_eq(tag, "symmetric_default")) { /* This string comparision may be fragile, but exclusive resources and * exclusive nodes should not have the symmetric_default constraint * applied to them. */ return; } else if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { match = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); } match->weight = merge_weights(match->weight, score); } void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag, pe_working_set_t * data_set) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (data_set != NULL) { GListPtr gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node_iter = (pe_node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; pe_node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if (node == NULL && score == -INFINITY) { if (rsc->allocated_to) { crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); free(rsc->allocated_to); rsc->allocated_to = NULL; } } } #define sort_return(an_int, why) do { \ free(a_uuid); \ free(b_uuid); \ crm_trace("%s (%d) %c %s (%d) : %s", \ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ b_xml_id, b_call_id, why); \ return an_int; \ } while(0) gint sort_op_by_callid(gconstpointer a, gconstpointer b) { int a_call_id = -1; int b_call_id = -1; char *a_uuid = NULL; char *b_uuid = NULL; const xmlNode *xml_a = a; const xmlNode *xml_b = b; const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID); const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID); if (safe_str_eq(a_xml_id, b_xml_id)) { /* We have duplicate lrm_rsc_op entries in the status * section which is unlikely to be a good thing * - we can handle it easily enough, but we need to get * to the bottom of why it's happening. */ pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); sort_return(0, "duplicate"); } crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); if (a_call_id == -1 && b_call_id == -1) { /* both are pending ops so it doesn't matter since * stops are never pending */ sort_return(0, "pending"); } else if (a_call_id >= 0 && a_call_id < b_call_id) { sort_return(-1, "call id"); } else if (b_call_id >= 0 && a_call_id > b_call_id) { sort_return(1, "call id"); } else if (b_call_id >= 0 && a_call_id == b_call_id) { /* * The op and last_failed_op are the same * Order on last-rc-change */ time_t last_a = -1; time_t last_b = -1; crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); crm_trace("rc-change: %lld vs %lld", (long long) last_a, (long long) last_b); if (last_a >= 0 && last_a < last_b) { sort_return(-1, "rc-change"); } else if (last_b >= 0 && last_a > last_b) { sort_return(1, "rc-change"); } sort_return(0, "rc-change"); } else { /* One of the inputs is a pending operation * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other */ int a_id = -1; int b_id = -1; const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC); const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC); CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL, NULL)) { sort_return(0, "bad magic a"); } if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL, NULL)) { sort_return(0, "bad magic b"); } /* try to determine the relative age of the operation... * some pending operations (e.g. a start) may have been superseded * by a subsequent stop * * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last */ if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) { /* * some of the logic in here may be redundant... * * if the UUID from the TE doesn't match then one better * be a pending operation. * pending operations don't survive between elections and joins * because we query the LRM directly */ if (b_call_id == -1) { sort_return(-1, "transition + call"); } else if (a_call_id == -1) { sort_return(1, "transition + call"); } } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { sort_return(-1, "transition"); } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { sort_return(1, "transition"); } } /* we should never end up here */ CRM_CHECK(FALSE, sort_return(0, "default")); } time_t get_effective_time(pe_working_set_t * data_set) { if(data_set) { if (data_set->now == NULL) { crm_trace("Recording a new 'now'"); data_set->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(data_set->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e * role) { enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (value == NULL || safe_str_eq("started", value) || safe_str_eq("default", value)) { return FALSE; } local_role = text2role(value); if (local_role == RSC_ROLE_UNKNOWN) { pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s " "because '%s' is not valid", rsc->id, value); return FALSE; } else if (local_role > RSC_ROLE_STARTED) { if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { if (local_role > RSC_ROLE_SLAVE) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s " "because '%s' only makes sense for promotable " "clones", rsc->id, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order) { GListPtr gIter = NULL; pe_action_wrapper_t *wrapper = NULL; GListPtr list = NULL; if (order == pe_order_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data; if (after->action == rh_action && (after->type & order)) { return FALSE; } } wrapper = calloc(1, sizeof(pe_action_wrapper_t)); wrapper->action = rh_action; wrapper->type = order; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = NULL; /* order |= pe_order_implies_then; */ /* order ^= pe_order_implies_then; */ wrapper = calloc(1, sizeof(pe_action_wrapper_t)); wrapper->action = lh_action; wrapper->type = order; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } pe_action_t * get_pseudo_op(const char *name, pe_working_set_t * data_set) { pe_action_t *op = NULL; if(data_set->singletons) { op = g_hash_table_lookup(data_set->singletons, name); } if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_pseudo); set_bit(op->flags, pe_action_runnable); } return op; } void destroy_ticket(gpointer data) { pe_ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } pe_ticket_t * ticket_new(const char *ticket_id, pe_working_set_t * data_set) { pe_ticket_t *ticket = NULL; if (ticket_id == NULL || strlen(ticket_id) == 0) { return NULL; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, free, destroy_ticket); } ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(pe_ticket_t)); if (ticket == NULL) { crm_err("Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creaing ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->granted = FALSE; ticket->last_granted = -1; ticket->standby = FALSE; ticket->state = crm_str_table_new(); g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); } return ticket; } static void filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) { if (param_set && param_string) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; char *name = crm_strdup_printf(" %s ", prop_name); char *match = strstr(param_string, name); free(name); // Do now, because current entry might get removed below xIter = xIter->next; if (need_present && match == NULL) { crm_trace("%s not found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } else if (need_present == FALSE && match) { crm_trace("%s found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } } } } #if ENABLE_VERSIONED_ATTRS static void append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params) { GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version); char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { crm_xml_add(params, key, value); } g_hash_table_destroy(hash); } #endif /*! * \internal * \brief Calculate action digests and store in node's digest cache * * \param[in] rsc Resource that action was for * \param[in] task Name of action performed * \param[in] key Action's task key * \param[in] node Node action was performed on * \param[in] xml_op XML of operation in CIB status (if available) * \param[in] calc_secure Whether to calculate secure digest * \param[in] data_set Cluster working set * * \return Pointer to node's digest cache entry */ static op_digest_cache_t * rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key, pe_node_t *node, xmlNode *xml_op, bool calc_secure, pe_working_set_t *data_set) { op_digest_cache_t *data = NULL; data = g_hash_table_lookup(node->details->digest_cache, key); if (data == NULL) { GHashTable *local_rsc_params = crm_str_table_new(); pe_action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set); #if ENABLE_VERSIONED_ATTRS xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); const char *ra_version = NULL; #endif const char *op_version; const char *restart_list = NULL; const char *secure_list = " passwd password "; data = calloc(1, sizeof(op_digest_cache_t)); CRM_ASSERT(data != NULL); get_rsc_attributes(local_rsc_params, rsc, node, data_set); #if ENABLE_VERSIONED_ATTRS pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); #endif data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside if (pe__add_bundle_remote_name(rsc, data->params_all, XML_RSC_ATTR_REMOTE_RA_ADDR)) { crm_trace("Set address for bundle connection %s (on %s)", rsc->id, node->details->uname); } g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); g_hash_table_foreach(action->extra, hash2field, data->params_all); g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); g_hash_table_foreach(action->meta, hash2metafield, data->params_all); if(xml_op) { secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); #if ENABLE_VERSIONED_ATTRS ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); #endif } else { op_version = CRM_FEATURE_SET; } #if ENABLE_VERSIONED_ATTRS append_versioned_params(local_versioned_params, ra_version, data->params_all); append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all); { pe_rsc_action_details_t *details = pe_rsc_action_details(action); append_versioned_params(details->versioned_parameters, ra_version, data->params_all); } #endif pcmk__filter_op_for_digest(data->params_all); g_hash_table_destroy(local_rsc_params); pe_free_action(action); data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); if (calc_secure) { data->params_secure = copy_xml(data->params_all); if(secure_list) { filter_parameters(data->params_secure, secure_list, FALSE); } data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); } if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { data->params_restart = copy_xml(data->params_all); if (restart_list) { filter_parameters(data->params_restart, restart_list, TRUE); } data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version); } g_hash_table_insert(node->details->digest_cache, strdup(key), data); } return data; } op_digest_cache_t * rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; char *key = NULL; guint interval_ms = 0; const char *op_version; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *digest_all; const char *digest_restart; CRM_ASSERT(node != NULL); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); key = pcmk__op_key(rsc->id, task, interval_ms); data = rsc_action_digest(rsc, task, key, node, xml_op, is_set(data_set->flags, pe_flag_sanitized), data_set); data->rc = RSC_DIGEST_MATCH; if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) { pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s", key, node->details->uname, crm_str(digest_restart), data->digest_restart_calc, op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); data->rc = RSC_DIGEST_RESTART; } else if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strcmp(digest_all, data->digest_all_calc) != 0) { pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s", key, node->details->uname, crm_str(digest_all), data->digest_all_calc, (interval_ms > 0)? "reschedule" : "reload", op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); data->rc = RSC_DIGEST_ALL; } free(key); return data; } /*! * \internal * \brief Create an unfencing summary for use in special node attribute * * Create a string combining a fence device's resource ID, agent type, and * parameter digest (whether for all parameters or just non-private parameters). * This can be stored in a special node attribute, allowing us to detect changes * in either the agent type or parameters, to know whether unfencing must be * redone or can be safely skipped when the device's history is cleaned. * * \param[in] rsc_id Fence device resource ID * \param[in] agent_type Fence device agent * \param[in] param_digest Fence device parameter digest * * \return Newly allocated string with unfencing digest * \note The caller is responsible for freeing the result. */ static inline char * create_unfencing_summary(const char *rsc_id, const char *agent_type, const char *param_digest) { return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest); } /*! * \internal * \brief Check whether a node can skip unfencing * * Check whether a fence device's current definition matches a node's * stored summary of when it was last unfenced by the device. * * \param[in] rsc_id Fence device's resource ID * \param[in] agent Fence device's agent type * \param[in] digest_calc Fence device's current parameter digest * \param[in] node_summary Value of node's special unfencing node attribute * (a comma-separated list of unfencing summaries for * all devices that have unfenced this node) * * \return TRUE if digest matches, FALSE otherwise */ static bool unfencing_digest_matches(const char *rsc_id, const char *agent, const char *digest_calc, const char *node_summary) { bool matches = FALSE; if (rsc_id && agent && digest_calc && node_summary) { char *search_secure = create_unfencing_summary(rsc_id, agent, digest_calc); /* The digest was calculated including the device ID and agent, * so there is no risk of collision using strstr(). */ matches = (strstr(node_summary, search_secure) != NULL); crm_trace("Calculated unfencing digest '%s' %sfound in '%s'", search_secure, matches? "" : "not ", node_summary); free(search_secure); } return matches; } /* Magic string to use as action name for digest cache entries used for * unfencing checks. This is not a real action name (i.e. "on"), so * check_action_definition() won't confuse these entries with real actions. */ #define STONITH_DIGEST_TASK "stonith-on" /*! * \internal * \brief Calculate fence device digests and digest comparison result * * \param[in] rsc Fence device resource * \param[in] agent Fence device's agent type * \param[in] node Node with digest cache to use * \param[in] data_set Cluster working set * * \return Node's digest cache entry */ static op_digest_cache_t * fencing_action_digest_cmp(pe_resource_t *rsc, const char *agent, pe_node_t *node, pe_working_set_t *data_set) { const char *node_summary = NULL; // Calculate device's current parameter digests char *key = pcmk__op_key(rsc->id, STONITH_DIGEST_TASK, 0); op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, node, NULL, TRUE, data_set); free(key); // Check whether node has special unfencing summary node attribute node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL); if (node_summary == NULL) { data->rc = RSC_DIGEST_UNKNOWN; return data; } // Check whether full parameter digest matches if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc, node_summary)) { data->rc = RSC_DIGEST_MATCH; return data; } // Check whether secure parameter digest matches node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE); if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc, node_summary)) { data->rc = RSC_DIGEST_MATCH; if (is_set(data_set->flags, pe_flag_stdout)) { printf("Only 'private' parameters to %s for unfencing %s changed\n", rsc->id, node->details->uname); } return data; } // Parameters don't match data->rc = RSC_DIGEST_ALL; if (is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout)) && data->digest_secure_calc) { char *digest = create_unfencing_summary(rsc->id, agent, data->digest_secure_calc); printf("Parameters to %s for unfencing %s changed, try '%s'\n", rsc->id, node->details->uname, digest); free(digest); } return data; } const char *rsc_printable_id(pe_resource_t *rsc) { if (is_not_set(rsc->flags, pe_rsc_unique)) { return ID(rsc->xml); } return rsc->id; } void clear_bit_recursive(pe_resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; clear_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; clear_bit_recursive(child_rsc, flag); } } void set_bit_recursive(pe_resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; set_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; set_bit_recursive(child_rsc, flag); } } static GListPtr find_unfencing_devices(GListPtr candidates, GListPtr matches) { for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) { pe_resource_t *candidate = gIter->data; const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES); const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES); if(candidate->children) { matches = find_unfencing_devices(candidate->children, matches); } else if (is_not_set(candidate->flags, pe_rsc_fence_device)) { continue; } else if (crm_str_eq(provides, "unfencing", FALSE) || crm_str_eq(requires, "unfencing", FALSE)) { matches = g_list_prepend(matches, candidate); } } return matches; } static int node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set) { int member_count = 0; int online_count = 0; int top_priority = 0; int lowest_priority = 0; GListPtr gIter = NULL; // `priority-fencing-delay` is disabled if (data_set->priority_fencing_delay < 0) { return -1; } /* No need to delay fencing if the fencing target is not a normal cluster * member, for example if it's a remote node or a guest node. */ if (node->details->type != node_member) { return 0; } // No need to delay fencing if the fencing target is in our partition if (node->details->online) { return 0; } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *n = gIter->data; if (n->details->type != node_member) { continue; } member_count ++; if (n->details->online) { online_count++; } if (member_count == 1 || n->details->priority > top_priority) { top_priority = n->details->priority; } if (member_count == 1 || n->details->priority < lowest_priority) { lowest_priority = n->details->priority; } } // No need to delay if we have more than half of the cluster members if (online_count > member_count / 2) { return 0; } /* All the nodes have equal priority. * Any configured corresponding `pcmk_delay_base/max` will be applied. */ if (lowest_priority == top_priority) { return -1; } if (node->details->priority < top_priority) { return 0; } return data_set->priority_fencing_delay; } pe_action_t * pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set) { char *op_key = NULL; pe_action_t *stonith_op = NULL; if(op == NULL) { op = data_set->stonith_action; } op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); if(data_set->singletons) { stonith_op = g_hash_table_lookup(data_set->singletons, op_key); } if(stonith_op == NULL) { stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param(stonith_op->meta, "stonith_action", op); if (pe__is_guest_or_remote_node(node) && is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Extra work to detect device changes on remotes * * We may do this for all nodes in the future, but for now * the check_action_definition() based stuff works fine. */ long max = 1024; long digests_all_offset = 0; long digests_secure_offset = 0; char *digests_all = calloc(max, sizeof(char)); char *digests_secure = calloc(max, sizeof(char)); GListPtr matches = find_unfencing_devices(data_set->resources, NULL); for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) { pe_resource_t *match = gIter->data; const char *agent = g_hash_table_lookup(match->meta, XML_ATTR_TYPE); op_digest_cache_t *data = NULL; data = fencing_action_digest_cmp(match, agent, node, data_set); if(data->rc == RSC_DIGEST_ALL) { optional = FALSE; crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); if (is_set(data_set->flags, pe_flag_stdout)) { fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id); } } digests_all_offset += snprintf( digests_all+digests_all_offset, max-digests_all_offset, "%s:%s:%s,", match->id, agent, data->digest_all_calc); digests_secure_offset += snprintf( digests_secure+digests_secure_offset, max-digests_secure_offset, "%s:%s:%s,", match->id, agent, data->digest_secure_calc); } g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_ALL), digests_all); g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_SECURE), digests_secure); } } else { free(op_key); } if (data_set->priority_fencing_delay >= 0 /* It's a suitable case where `priority-fencing-delay` applies. * At least add `priority-fencing-delay` field as an indicator. */ && (priority_delay /* Re-calculate priority delay for the suitable case when * pe_fence_op() is called again by stage6() after node priority has * been actually calculated with native_add_running() */ || g_hash_table_lookup(stonith_op->meta, XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) { /* Add `priority-fencing-delay` to the fencing op even if it's 0 for * the targeting node. So that it takes precedence over any possible * `pcmk_delay_base/max`. */ char *delay_s = crm_itoa(node_priority_fencing_delay(node, data_set)); g_hash_table_insert(stonith_op->meta, strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY), delay_s); } if(optional == FALSE && pe_can_fence(data_set, node)) { pe_action_required(stonith_op, NULL, reason); } else if(reason && stonith_op->reason == NULL) { stonith_op->reason = strdup(reason); } return stonith_op; } void trigger_unfencing( pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set) { if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) { /* No resources require it */ return; } else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set); if(dependency) { order_actions(unfence, dependency, pe_order_optional); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, data_set); } } } } gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) { pe_tag_t *tag = NULL; GListPtr gIter = NULL; gboolean is_existing = FALSE; CRM_CHECK(tags && tag_name && obj_ref, return FALSE); tag = g_hash_table_lookup(tags, tag_name); if (tag == NULL) { tag = calloc(1, sizeof(pe_tag_t)); if (tag == NULL) { return FALSE; } tag->id = strdup(tag_name); tag->refs = NULL; g_hash_table_insert(tags, strdup(tag_name), tag); } for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *existing_ref = (const char *) gIter->data; if (crm_str_eq(existing_ref, obj_ref, TRUE)){ is_existing = TRUE; break; } } if (is_existing == FALSE) { tag->refs = g_list_append(tag->refs, strdup(obj_ref)); crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); } return TRUE; } void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite) { bool unset = FALSE; bool update = FALSE; const char *change = NULL; if(is_set(flags, pe_action_runnable)) { unset = TRUE; change = "unrunnable"; } else if(is_set(flags, pe_action_optional)) { unset = TRUE; change = "required"; } else if(is_set(flags, pe_action_migrate_runnable)) { unset = TRUE; overwrite = TRUE; change = "unrunnable"; } else if(is_set(flags, pe_action_dangle)) { change = "dangling"; } else if(is_set(flags, pe_action_requires_any)) { change = "required"; } else { crm_err("Unknown flag change to %x by %s: 0x%s", flags, action->uuid, (reason? reason->uuid : "0")); } if(unset) { if(is_set(action->flags, flags)) { action->flags = crm_clear_bit(function, line, action->uuid, action->flags, flags); update = TRUE; } } else { if(is_not_set(action->flags, flags)) { action->flags = crm_set_bit(function, line, action->uuid, action->flags, flags); update = TRUE; } } if((change && update) || text) { char *reason_text = NULL; if(reason == NULL) { pe_action_set_reason(action, text, overwrite); } else if(reason->rsc == NULL) { reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:""); } else { reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA"); } if(reason_text && action->rsc != reason->rsc) { pe_action_set_reason(action, reason_text, overwrite); } free(reason_text); } } void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) { if (action->reason != NULL && overwrite) { pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'", action->uuid, action->reason, crm_str(reason)); free(action->reason); } else if (action->reason == NULL) { pe_rsc_trace(action->rsc, "Set %s reason to '%s'", action->uuid, crm_str(reason)); } else { // crm_assert(action->reason != NULL && !overwrite); return; } if (reason != NULL) { action->reason = strdup(reason); } else { action->reason = NULL; } } /*! * \internal * \brief Check whether shutdown has been requested for a node * * \param[in] node Node to check * * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise * \note This differs from simply using node->details->shutdown in that it can * be used before that has been determined (and in fact to determine it), * and it can also be used to distinguish requested shutdown from implicit * shutdown of remote nodes by virtue of their connection stopping. */ bool pe__shutdown_requested(pe_node_t *node) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); return shutdown && strcmp(shutdown, "0"); } /*! * \internal * \brief Update a data set's "recheck by" time * * \param[in] recheck Epoch time when recheck should happen * \param[in,out] data_set Current working set */ void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set) { if ((recheck > get_effective_time(data_set)) && ((data_set->recheck_by == 0) || (data_set->recheck_by > recheck))) { data_set->recheck_by = recheck; } } /*! * \internal * \brief Wrapper for pe_unpack_nvpairs() using a cluster working set */ void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, GHashTable *node_hash, GHashTable *hash, const char *always_first, gboolean overwrite, pe_working_set_t *data_set) { crm_time_t *next_change = crm_time_new_undefined(); pe_unpack_nvpairs(data_set->input, xml_obj, set_name, node_hash, hash, always_first, overwrite, data_set->now, next_change); if (crm_time_is_defined(next_change)) { time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(recheck, data_set); } crm_time_free(next_change); } bool pe__resource_is_disabled(pe_resource_t *rsc) { const char *target_role = NULL; CRM_CHECK(rsc != NULL, return false); target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); if (target_role) { enum rsc_role_e target_role_e = text2role(target_role); if ((target_role_e == RSC_ROLE_STOPPED) || ((target_role_e == RSC_ROLE_SLAVE) && is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) { return true; } } return false; } /*! * \internal * \brief Create an action to clear a resource's history from CIB * * \param[in] rsc Resource to clear * \param[in] node Node to clear history on * * \return New action to clear resource history */ pe_action_t * pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { char *key = NULL; CRM_ASSERT(rsc && node); key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0); return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE, data_set); } diff --git a/maint/testcc.cc b/maint/testcc.cc index 89d216ee48..50843c2468 100644 --- a/maint/testcc.cc +++ b/maint/testcc.cc @@ -1,26 +1,24 @@ /* * Copyright 2019 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include -#include #include #include #include -#include #include #include #include #include #include #include int main() { return 0; } diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in index 5f64f82c46..c0c8a5baaf 100644 --- a/rpm/pacemaker.spec.in +++ b/rpm/pacemaker.spec.in @@ -1,893 +1,893 @@ # Globals and defines to control package behavior (configure these as desired) ## User and group to use for nonprivileged services %global uname hacluster %global gname haclient ## Where to install Pacemaker documentation %if 0%{?suse_version} > 0 %global pcmk_docdir %{_docdir}/%{name}-%{version} %else %if 0%{?rhel} > 7 %global pcmk_docdir %{_docdir}/%{name}-doc %else %global pcmk_docdir %{_docdir}/%{name} %endif %endif ## GitHub entity that distributes source (for ease of using a fork) %global github_owner ClusterLabs ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) -%global pcmkversion 2.0.3 +%global pcmkversion X.Y.Z %global specversion 1 ## Upstream commit (or git tag, such as "Pacemaker-" plus the ## {pcmkversion} macro for an official release) to use for this package %global commit HEAD ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %global commit_abbrev 7 ## Python major version to use (2, 3, or 0 for auto-detect) %global python_major 0 # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) ## Short version of git commit %define shortcommit %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo ${c:10};; *%{rparen} echo ${c:0:%{commit_abbrev}};; esac) ## Whether this is a tagged release %define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?) ## Whether this is a release candidate (in case of a tagged release) %define pre_release %([ "%{tag_release}" -eq 0 ] || { case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;; esac; }; echo $?) ## Heuristic used to infer bleeding-edge deployments that are ## less likely to have working versions of the documentation tools %define bleeding %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) ## Whether this platform defaults to using systemd as an init system ## (needs to be evaluated prior to BuildRequires being enumerated and ## installed as it's intended to conditionally select some of these, and ## for that there are only few indicators with varying reliability: ## - presence of systemd-defined macros (when building in a full-fledged ## environment, which is not the case with ordinary mock-based builds) ## - systemd-aware rpm as manifested with the presence of particular ## macro (rpm itself will trivially always be present when building) ## - existence of /usr/lib/os-release file, which is something heavily ## propagated by systemd project ## - when not good enough, there's always a possibility to check ## particular distro-specific macros (incl. version comparison) %define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \ } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \ } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) %if 0%{?fedora} > 20 || 0%{?rhel} > 7 ## Base GnuTLS cipher priorities (presumably only the initial, required keyword) ## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'" %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM} %endif %if !%{defined _rundir} %if 0%{?fedora} >= 15 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1200 %define _rundir /run %else %define _rundir /var/run %endif %endif %if 0%{?fedora} > 22 || 0%{?rhel} > 7 %global supports_recommends 1 %endif ## Different distros name certain packages differently ## (note: corosync libraries also differ, but all provide corosync-devel) %if 0%{?suse_version} > 0 %global pkgname_bzip2_devel libbz2-devel %global pkgname_docbook_xsl docbook-xsl-stylesheets %global pkgname_gnutls_devel libgnutls-devel %global pkgname_shadow_utils shadow %global pkgname_procps procps %global pkgname_glue_libs libglue %global pkgname_pcmk_libs lib%{name}3 %global hacluster_id 90 %else %global pkgname_libtool_devel libtool-ltdl-devel %global pkgname_libtool_devel_arch libtool-ltdl-devel%{?_isa} %global pkgname_bzip2_devel bzip2-devel %global pkgname_docbook_xsl docbook-style-xsl %global pkgname_gnutls_devel gnutls-devel %global pkgname_shadow_utils shadow-utils %global pkgname_procps procps-ng %global pkgname_publican publican %global pkgname_glue_libs cluster-glue-libs %global pkgname_pcmk_libs %{name}-libs %global hacluster_id 189 %endif # Python-related definitions ## Use Python 3 on certain platforms if major version not specified %if %{?python_major} == 0 %if 0%{?fedora} > 26 || 0%{?rhel} > 7 %global python_major 3 %endif %endif ## Turn off auto-compilation of Python files outside Python specific paths, ## so there's no risk that unexpected "__python" macro gets picked to do the ## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected) ## -- distro-dependent tricks or automake's fallback to be applied there %if %{defined _python_bytecompile_extra} %global _python_bytecompile_extra 0 %else ### the statement effectively means no RPM-native byte-compiling will occur at ### all, so distro-dependent tricks for Python-specific packages to be applied %global __os_install_post %(echo '%{__os_install_post}' | { sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) %endif ## Values that differ by Python major version %if 0%{?python_major} > 2 %global python_name python3 %global python_path %{?__python3}%{!?__python3:/usr/bin/python%{?python3_pkgversion}%{!?python3_pkgversion:3}} %define python_site %{?python3_sitelib}%{!?python3_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else %if 0%{?python_major} > 1 %global python_name python2 %global python_path %{?__python2}%{!?__python2:/usr/bin/python%{?python2_pkgversion}%{!?python2_pkgversion:2}} %define python_site %{?python2_sitelib}%{!?python2_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else %global python_name python %global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}} %define python_site %{?python_sitelib}%{!?python_sitelib:%( python -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %endif %endif # Definitions for backward compatibility with older RPM versions ## Ensure the license macro behaves consistently (older RPM will otherwise ## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts: ## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77 %if !%{defined _licensedir} %define description %{lua: rpm.define("license %doc") print("%description") } %endif # Define conditionals so that "rpmbuild --with " and # "rpmbuild --without " can enable and disable specific features ## Add option to enable support for stonith/external fencing agents %bcond_with stonithd ## Add option to create binaries suitable for use with profiling tools %bcond_with profiling ## Add option to create binaries with coverage analysis %bcond_with coverage ## Add option to skip generating documentation ## (the build tools aren't available everywhere) %bcond_without doc ## Add option to prefix package version with "0." ## (so later "official" packages will be considered updates) %bcond_with pre_release ## Add option to ship Upstart job files %bcond_with upstart_job ## Add option to turn off hardening of libraries and daemon executables %bcond_without hardening ## Add option to disable links for legacy daemon names %bcond_without legacy_links # Keep sane profiling data if requested %if %{with profiling} ## Disable -debuginfo package and stripping binaries/libraries %define debug_package %{nil} %endif # Define the release version # (do not look at externally enforced pre-release flag for tagged releases # as only -rc tags, captured with the second condition, implies that then) %if (!%{tag_release} && %{with pre_release}) || 0%{pre_release} %if 0%{pre_release} %define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) %else %define pcmk_release 0.%{specversion}.%{shortcommit}.git %endif %else %if 0%{tag_release} %define pcmk_release %{specversion} %else %define pcmk_release %{specversion}.%{shortcommit}.git %endif %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Url: http://www.clusterlabs.org Group: System Environment/Daemons # Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL: # https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz Requires: resource-agents Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} %if !%{defined _unitdir} Requires: %{pkgname_procps} Requires: psmisc %endif %{?systemd_requires} Requires: %{python_path} BuildRequires: %{python_name}-devel # Pacemaker requires a minimum libqb functionality Requires: libqb >= 0.13.0 BuildRequires: libqb-devel >= 0.13.0 # Basics required for the build (even if usually satisfied through other BRs) BuildRequires: coreutils findutils grep sed # Required for core functionality BuildRequires: automake autoconf gcc libtool pkgconfig %{?pkgname_libtool_devel} BuildRequires: pkgconfig(glib-2.0) >= 2.16 BuildRequires: libxml2-devel libxslt-devel libuuid-devel BuildRequires: %{pkgname_bzip2_devel} # Enables optional functionality BuildRequires: ncurses-devel %{pkgname_docbook_xsl} BuildRequires: help2man %{pkgname_gnutls_devel} pam-devel pkgconfig(dbus-1) %if %{systemd_native} BuildRequires: pkgconfig(systemd) %endif Requires: corosync >= 2.0.0 BuildRequires: corosync-devel >= 2.0.0 %if %{with stonithd} BuildRequires: %{pkgname_glue_libs}-devel %endif ## (note no avoiding effect when building through non-customized mock) %if !%{bleeding} %if %{with doc} BuildRequires: inkscape asciidoc %{?pkgname_publican} %endif %endif Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} # Bundled bits ## Pacemaker uses the crypto/md5-buffer module from gnulib %if 0%{?fedora} || 0%{?rhel} Provides: bundled(gnulib) %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager. It supports more than 16 node clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --with(out) : coverage doc stonithd hardening pre_release profiling upstart_job %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %if 0%{?supports_recommends} Recommends: pcmk-cluster-manager = %{version}-%{release} # For crm_report Recommends: tar Recommends: bzip2 %endif Requires: perl-TimeDate Requires: %{pkgname_procps} Requires: psmisc Requires(post):coreutils %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{pkgname_pcmk_libs} License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries Group: System Environment/Daemons Requires(pre): %{pkgname_shadow_utils} Requires: %{name}-schemas = %{version}-%{release} # sbd 1.4.0+ supports the libpe_status API for pe_working_set_t Conflicts: sbd < 1.4.0 %description -n %{pkgname_pcmk_libs} Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs} package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %description cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package remote %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Summary: Pacemaker remote daemon for non-cluster nodes Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents %if !%{defined _unitdir} Requires: %{pkgname_procps} %endif # -remote can be fully independent of systemd %{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}} Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description remote Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-remote package contains the Pacemaker Remote daemon which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{pkgname_pcmk_libs}-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: libuuid-devel%{?_isa} %{?pkgname_libtool_devel_arch} Requires: libxml2-devel%{?_isa} libxslt-devel%{?_isa} Requires: %{pkgname_bzip2_devel}%{?_isa} glib2-devel%{?_isa} Requires: libqb-devel%{?_isa} Requires: corosync-devel >= 2.0.0 %description -n %{pkgname_pcmk_libs}-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs}-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: %{python_path} Requires: %{pkgname_pcmk_libs} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: %{pkgname_procps} Requires: psmisc BuildArch: noarch # systemd python bindings are separate package in some distros %if %{defined systemd_requires} %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Requires: %{python_name}-systemd %else %if 0%{?fedora} > 20 || 0%{?rhel} > 6 Requires: systemd-python %endif %endif %endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: CC-BY-SA-4.0 Summary: Documentation for Pacemaker Group: Documentation BuildArch: noarch %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager. %package schemas License: GPLv2+ Summary: Schemas and upgrade stylesheets for Pacemaker BuildArch: noarch %description schemas Schemas and upgrade stylesheets for Pacemaker Pacemaker is an advanced, scalable High-Availability cluster resource manager. %prep %setup -q -n %{name}-%{commit} %build # Early versions of autotools (e.g. RHEL <= 5) do not support --docdir export docdir=%{pcmk_docdir} export systemdsystemunitdir=%{?_unitdir}%{!?_unitdir:no} %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will # use its own defaults otherwise; if such hardenings are completely # undesired, rpmbuild using "--without hardening" # (or "--define '_without_hardening 1'") export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %endif ./autogen.sh %{configure} \ PYTHON=%{python_path} \ %{!?with_hardening: --disable-hardening} \ %{!?with_legacy_links: --disable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{!?with_doc: --with-brand=} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ --with-initdir=%{_initrddir} \ --with-runstatedir=%{_rundir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif make %{_smp_mflags} V=1 %check make %{_smp_mflags} check { cts/cts-scheduler --run load-stopped-loop \ && cts/cts-cli \ && touch .CHECKED } 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint [ -f .CHECKED ] && rm -f -- .CHECKED exit $? # TODO remove when rpm<4.14 compatibility irrelevant %install # skip automake-native Python byte-compilation, since RPM-native one (possibly # distro-confined to Python-specific directories, which is currently the only # relevant place, anyway) assures proper intrinsic alignment with wider system # (such as with py_byte_compile macro, which is concurrent Fedora/EL specific) make install \ DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \ %{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 daemons/pacemakerd/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon %if %{with upstart_job} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf install -m 644 pacemakerd/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf %endif %if %{defined _unitdir} mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif # Don't package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # For now, don't package the servicelog-related binaries built only for # ppc64le when certain dependencies are installed. If they get more exercise by # advanced users, we can reconsider. rm -f %{buildroot}/%{_sbindir}/notifyServicelogEvent rm -f %{buildroot}/%{_sbindir}/ipmiservicelogd # Don't ship init scripts for systemd based platforms %if %{defined _unitdir} rm -f %{buildroot}/%{_initrddir}/pacemaker rm -f %{buildroot}/%{_initrddir}/pacemaker_remote %endif # Byte-compile Python sources where suitable and the distro procedures known %if %{defined py_byte_compile} %{py_byte_compile %{python_path} %{buildroot}%{_datadir}/pacemaker/tests} %if !%{defined _python_bytecompile_extra} %{py_byte_compile %{python_path} %{buildroot}%{python_site}/cts} %endif %endif %if %{with coverage} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %post %if %{defined _unitdir} %systemd_post pacemaker.service %else /sbin/chkconfig --add pacemaker || : %endif %preun %if %{defined _unitdir} %systemd_preun pacemaker.service %else /sbin/service pacemaker stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker || : fi %endif %postun %if %{defined _unitdir} %systemd_postun_with_restart pacemaker.service %endif %pre remote %if %{defined _unitdir} # Stop the service before anything is touched, and remember to restart # it as one of the last actions (compared to using systemd_postun_with_restart, # this avoids suicide when sbd is in use) systemctl --quiet is-active pacemaker_remote if [ $? -eq 0 ] ; then mkdir -p %{_localstatedir}/lib/rpm-state/%{name} touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote systemctl stop pacemaker_remote >/dev/null 2>&1 else rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post remote %if %{defined _unitdir} %systemd_post pacemaker_remote.service %else /sbin/chkconfig --add pacemaker_remote || : %endif %preun remote %if %{defined _unitdir} %systemd_preun pacemaker_remote.service %else /sbin/service pacemaker_remote stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker_remote || : fi %endif %postun remote %if %{defined _unitdir} # This next line is a no-op, because we stopped the service earlier, but # we leave it here because it allows us to revert to the standard behavior # in the future if desired %systemd_postun_with_restart pacemaker_remote.service # Explicitly take care of removing the flag-file(s) upon final removal if [ "$1" -eq 0 ] ; then rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %posttrans remote %if %{defined _unitdir} if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then systemctl start pacemaker_remote >/dev/null 2>&1 rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post cli %if %{defined _unitdir} %systemd_post crm_mon.service %endif if [ "$1" -eq 2 ]; then # Package upgrade, not initial install: # Move any pre-2.0 logs to new location to ensure they get rotated { mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \ || mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker } >/dev/null 2>/dev/null || : fi %preun cli %if %{defined _unitdir} %systemd_preun crm_mon.service %endif %postun cli %if %{defined _unitdir} %systemd_postun_with_restart crm_mon.service %endif %pre -n %{pkgname_pcmk_libs} getent group %{gname} >/dev/null || groupadd -r %{gname} -g %{hacluster_id} getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} -s /sbin/nologin -c "cluster user" %{uname} exit 0 %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets libs %ldconfig_scriptlets cluster-libs %else %post -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %postun -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %post cluster-libs -p /sbin/ldconfig %postun cluster-libs -p /sbin/ldconfig %endif %files ########################################################### %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %if %{defined _unitdir} %{_unitdir}/pacemaker.service %else %{_initrddir}/pacemaker %endif %exclude %{_libexecdir}/pacemaker/cts-log-watcher %exclude %{_libexecdir}/pacemaker/cts-support %exclude %{_sbindir}/pacemaker-remoted %if %{with legacy_links} %exclude %{_sbindir}/pacemaker_remoted %endif %{_libexecdir}/pacemaker/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/fence_legacy %doc %{_mandir}/man7/pacemaker-controld.* %doc %{_mandir}/man7/pacemaker-schedulerd.* %doc %{_mandir}/man7/pacemaker-fenced.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* %doc %{_mandir}/man7/ocf_pacemaker_o2cb.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/fence_legacy.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_datadir}/pacemaker/alerts %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine /usr/lib/ocf/resource.d/pacemaker/controld /usr/lib/ocf/resource.d/pacemaker/o2cb /usr/lib/ocf/resource.d/pacemaker/remote %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/pacemaker.conf %config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf %endif %files cli %dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %if %{defined _unitdir} %{_unitdir}/crm_mon.service %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/crm_mon.conf %endif %{_sbindir}/attrd_updater %{_sbindir}/cibadmin %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount %{_sbindir}/crm_mon %{_sbindir}/crm_node %{_sbindir}/crm_resource %{_sbindir}/crm_rule %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/crm_shadow %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket %{_sbindir}/stonith_admin # "dirname" is owned by -schemas, which is a prerequisite %{_datadir}/pacemaker/report.collector %{_datadir}/pacemaker/report.common # XXX "dirname" is not owned by any prerequisite %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude /usr/lib/ocf/resource.d/pacemaker/controld %exclude /usr/lib/ocf/resource.d/pacemaker/o2cb %exclude /usr/lib/ocf/resource.d/pacemaker/remote %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %doc %{_mandir}/man7/* %exclude %{_mandir}/man7/pacemaker-controld.* %exclude %{_mandir}/man7/pacemaker-schedulerd.* %exclude %{_mandir}/man7/pacemaker-fenced.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_master.* %exclude %{_mandir}/man8/fence_legacy.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles %files -n %{pkgname_pcmk_libs} %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpacemaker.so.* %{_libdir}/libstonithd.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files cluster-libs %{_libdir}/libcrmcluster.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files remote %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} # state directory is shared between the subpackets # let rpm take care of removing it once it isn't # referenced anymore and empty %ghost %dir %{_localstatedir}/lib/rpm-state/%{name} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote %endif %{_sbindir}/pacemaker-remoted %if %{with legacy_links} %{_sbindir}/pacemaker_remoted %endif %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files doc %doc %{pcmk_docdir} %license licenses/CC-BY-SA-4.0 %files cts %{python_site}/cts %{_datadir}/pacemaker/tests %{_libexecdir}/pacemaker/cts-log-watcher %{_libexecdir}/pacemaker/cts-support %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files -n %{pkgname_pcmk_libs}-devel %{_includedir}/pacemaker %{_libdir}/*.so %if %{with coverage} %{_var}/lib/pacemaker/gcov %endif %{_libdir}/pkgconfig/*.pc %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files schemas %license licenses/GPLv2 %dir %{_datadir}/pacemaker %{_datadir}/pacemaker/*.rng %{_datadir}/pacemaker/*.xsl %{_datadir}/pacemaker/api %{_datadir}/pkgconfig/pacemaker-schemas.pc %changelog * PACKAGE_DATE ClusterLabs PACKAGE_VERSION-1 - See included ChangeLog file for details diff --git a/tools/cibadmin.c b/tools/cibadmin.c index 6deec24f90..f7413fcb57 100644 --- a/tools/cibadmin.c +++ b/tools/cibadmin.c @@ -1,772 +1,772 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include static int message_timeout_ms = 30; static int command_options = 0; static int request_id = 0; static int bump_log_num = 0; static const char *host = NULL; static const char *cib_user = NULL; static const char *cib_action = NULL; static const char *obj_type = NULL; static cib_t *the_cib = NULL; static GMainLoop *mainloop = NULL; static gboolean force_flag = FALSE; static crm_exit_t exit_code = CRM_EX_OK; int do_init(void); int do_work(xmlNode *input, int command_options, xmlNode **output); void cibadmin_op_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data); static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "help", no_argument, NULL, '?', "\tThis text", pcmk__option_default }, { "version", no_argument, NULL, '$', "\tVersion information", pcmk__option_default }, { "verbose", no_argument, NULL, 'V', "\tIncrease debug output\n", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "Commands:", pcmk__option_default }, { "upgrade", no_argument, NULL, 'u', "\tUpgrade the configuration to the latest syntax", pcmk__option_default }, { "query", no_argument, NULL, 'Q', "\tQuery the contents of the CIB", pcmk__option_default }, { "erase", no_argument, NULL, 'E', "\tErase the contents of the whole CIB", pcmk__option_default }, { "bump", no_argument, NULL, 'B', "\tIncrease the CIB's epoch value by 1", pcmk__option_default }, { "create", no_argument, NULL, 'C', "\tCreate an object in the CIB (will fail if object already exists)", pcmk__option_default }, { "modify", no_argument, NULL, 'M', "\tFind object somewhere in CIB's XML tree and update it " "(fails if object does not exist unless -c is also specified)", pcmk__option_default }, { "patch", no_argument, NULL, 'P', "\tSupply an update in the form of an XML diff (see crm_diff(8))", pcmk__option_default }, { "replace", no_argument, NULL, 'R', "\tRecursively replace an object in the CIB", pcmk__option_default }, { "delete", no_argument, NULL, 'D', "\tDelete first object matching supplied criteria " "(for example, )", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\n\tThe XML element name and all attributes must match " "in order for the element to be deleted.\n", pcmk__option_default }, { "delete-all", no_argument, NULL, 'd', "When used with --xpath, remove all matching objects in the " "configuration instead of just the first one", pcmk__option_default }, { "empty", no_argument, NULL, 'a', "\tOutput an empty CIB", pcmk__option_default }, { "md5-sum", no_argument, NULL, '5', "\tCalculate the on-disk CIB digest", pcmk__option_default }, { "md5-sum-versioned", no_argument, NULL, '6', "Calculate an on-the-wire versioned CIB digest", pcmk__option_default }, { "blank", no_argument, NULL, '-', NULL, pcmk__option_hidden }, { "-spacer-", required_argument, NULL, '-', "\nAdditional options:", pcmk__option_default }, { "force", no_argument, NULL, 'f', NULL, pcmk__option_default }, { "timeout", required_argument, NULL, 't', "Time (in seconds) to wait before declaring the operation failed", pcmk__option_default }, { "user", required_argument, NULL, 'U', "Run the command with permissions of the named user (valid only for " "the root and " CRM_DAEMON_USER " accounts)", pcmk__option_default }, { "sync-call", no_argument, NULL, 's', "Wait for call to complete before returning", pcmk__option_default }, { "local", no_argument, NULL, 'l', "\tCommand takes effect locally (should be used only for queries)", pcmk__option_default }, { "allow-create", no_argument, NULL, 'c', "(Advanced) Allow target of --modify/-M to be created " "if it does not exist", pcmk__option_default }, { "no-children", no_argument, NULL, 'n', "(Advanced) When querying an object, do not include its children " "in the result", pcmk__option_default }, { "no-bcast", no_argument, NULL, 'b', NULL, pcmk__option_hidden }, { "-spacer-", no_argument, NULL, '-', "\nData:", pcmk__option_default }, { "xml-text", required_argument, NULL, 'X', "Retrieve XML from the supplied string", pcmk__option_default }, { "xml-file", required_argument, NULL, 'x', "Retrieve XML from the named file", pcmk__option_default }, { "xml-pipe", no_argument, NULL, 'p', "Retrieve XML from stdin\n", pcmk__option_default }, { "scope", required_argument, NULL, 'o', "Limit scope of operation to specific section of CIB", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\tValid values: nodes, resources, constraints, crm_config, " "rsc_defaults, op_defaults, status", pcmk__option_default }, { "xpath", required_argument, NULL, 'A', "A valid XPath to use instead of --scope/-o", pcmk__option_default }, { "node-path", no_argument, NULL, 'e', "When performing XPath queries, return path of any matches found", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "(for example, \"/cib/configuration/resources/clone[@id='ms_RH1_SCS']" "/primitive[@id='prm_RH1_SCS']\")", pcmk__option_paragraph }, { "node", required_argument, NULL, 'N', "(Advanced) Send command to the specified host", pcmk__option_default }, { "-spacer-", no_argument, NULL, '!', NULL, pcmk__option_hidden }, { "-spacer-", no_argument, NULL, '-', "\n\nExamples:\n", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "Query the configuration from the local node:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --query --local", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Query just the cluster options configuration:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --query --scope crm_config", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Query all 'target-role' settings:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --query --xpath \"//nvpair[@name='target-role']\"", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Remove all 'is-managed' settings:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --delete-all --xpath \"//nvpair[@name='is-managed']\"", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Remove the resource named 'old':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --delete --xml-text ''", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Remove all resources from the configuration:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --replace --scope resources --xml-text ''", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Replace complete configuration with contents of $HOME/pacemaker.xml:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --replace --xml-file $HOME/pacemaker.xml", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Replace constraints section of configuration with contents of " "$HOME/constraints.xml:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --replace --scope constraints --xml-file " "$HOME/constraints.xml", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Increase configuration version to prevent old configurations from " "being loaded accidentally:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --modify --xml-text ''", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Edit the configuration with your favorite $EDITOR:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " cibadmin --query > $HOME/local.xml", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', " $EDITOR $HOME/local.xml", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', " cibadmin --replace --xml-file $HOME/local.xml", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "SEE ALSO:", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', " crm(8), pcs(8), crm_shadow(8), crm_diff(8)", pcmk__option_default }, { "host", required_argument, NULL, 'h', "deprecated", pcmk__option_hidden }, { 0, 0, 0, 0 } }; static void print_xml_output(xmlNode * xml) { char *buffer; if (!xml) { return; } else if (xml->type != XML_ELEMENT_NODE) { return; } if (command_options & cib_xpath_address) { const char *id = crm_element_value(xml, XML_ATTR_ID); if (safe_str_eq((const char *)xml->name, "xpath-query")) { xmlNode *child = NULL; for (child = xml->children; child; child = child->next) { print_xml_output(child); } } else if (id) { printf("%s\n", id); } } else { buffer = dump_xml_formatted(xml); fprintf(stdout, "%s", crm_str(buffer)); free(buffer); } } // Upgrade requested but already at latest schema static void report_schema_unchanged() { const char *err = pcmk_strerror(pcmk_err_schema_unchanged); crm_info("Upgrade unnecessary: %s\n", err); printf("Upgrade unnecessary: %s\n", err); exit_code = CRM_EX_OK; } int main(int argc, char **argv) { int argerr = 0; int rc = pcmk_ok; int flag; const char *source = NULL; const char *admin_input_xml = NULL; const char *admin_input_file = NULL; gboolean dangerous_cmd = FALSE; gboolean admin_input_stdin = FALSE; xmlNode *output = NULL; xmlNode *input = NULL; int option_index = 0; crm_log_cli_init("cibadmin"); set_crm_log_level(LOG_CRIT); pcmk__set_cli_options(NULL, " [options]", long_options, "query and edit the Pacemaker configuration"); if (argc < 2) { pcmk__cli_help('?', CRM_EX_USAGE); } while (1) { flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); if (flag == -1) break; switch (flag) { case 't': message_timeout_ms = atoi(optarg); if (message_timeout_ms < 1) { message_timeout_ms = 30; } break; case 'A': obj_type = optarg; command_options |= cib_xpath; break; case 'e': command_options |= cib_xpath_address; break; case 'u': cib_action = CIB_OP_UPGRADE; dangerous_cmd = TRUE; break; case 'E': cib_action = CIB_OP_ERASE; dangerous_cmd = TRUE; break; case 'Q': cib_action = CIB_OP_QUERY; break; case 'P': cib_action = CIB_OP_APPLY_DIFF; break; case 'U': cib_user = optarg; break; case 'M': cib_action = CIB_OP_MODIFY; break; case 'R': cib_action = CIB_OP_REPLACE; break; case 'C': cib_action = CIB_OP_CREATE; break; case 'D': cib_action = CIB_OP_DELETE; break; case '5': cib_action = "md5-sum"; break; case '6': cib_action = "md5-sum-versioned"; break; case 'c': command_options |= cib_can_create; break; case 'n': command_options |= cib_no_children; break; case 'B': cib_action = CIB_OP_BUMP; crm_log_args(argc, argv); break; case 'V': command_options = command_options | cib_verbose; bump_log_num++; break; case '?': case '$': case '!': pcmk__cli_help(flag, CRM_EX_OK); break; case 'o': crm_trace("Option %c => %s", flag, optarg); obj_type = optarg; break; case 'X': crm_trace("Option %c => %s", flag, optarg); admin_input_xml = optarg; crm_log_args(argc, argv); break; case 'x': crm_trace("Option %c => %s", flag, optarg); admin_input_file = optarg; crm_log_args(argc, argv); break; case 'p': admin_input_stdin = TRUE; crm_log_args(argc, argv); break; case 'N': case 'h': host = strdup(optarg); break; case 'l': command_options |= cib_scope_local; break; case 'd': cib_action = CIB_OP_DELETE; command_options |= cib_multiple; dangerous_cmd = TRUE; break; case 'b': dangerous_cmd = TRUE; command_options |= cib_inhibit_bcast; command_options |= cib_scope_local; break; case 's': command_options |= cib_sync_call; break; case 'f': force_flag = TRUE; command_options |= cib_quorum_override; crm_log_args(argc, argv); break; case 'a': output = createEmptyCib(1); if (optind < argc) { crm_xml_add(output, XML_ATTR_VALIDATION, argv[optind]); } admin_input_xml = dump_xml_formatted(output); fprintf(stdout, "%s\n", crm_str(admin_input_xml)); crm_exit(CRM_EX_OK); break; default: printf("Argument code 0%o (%c)" " is not (?yet?) supported\n", flag, flag); ++argerr; break; } } while (bump_log_num > 0) { crm_bump_log_level(argc, argv); bump_log_num--; } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); pcmk__cli_help('?', CRM_EX_USAGE); } if (optind > argc || cib_action == NULL) { ++argerr; } if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } if (dangerous_cmd && force_flag == FALSE) { fprintf(stderr, "The supplied command is considered dangerous." " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n"); fflush(stderr); crm_exit(CRM_EX_UNSAFE); } if (admin_input_file != NULL) { input = filename2xml(admin_input_file); source = admin_input_file; } else if (admin_input_xml != NULL) { source = "input string"; input = string2xml(admin_input_xml); } else if (admin_input_stdin) { source = "STDIN"; input = stdin2xml(); } if (input != NULL) { crm_log_xml_debug(input, "[admin input]"); } else if (source) { fprintf(stderr, "Couldn't parse input from %s.\n", source); crm_exit(CRM_EX_CONFIG); } if (safe_str_eq(cib_action, "md5-sum")) { char *digest = NULL; if (input == NULL) { fprintf(stderr, "Please supply XML to process with -X, -x or -p\n"); crm_exit(CRM_EX_USAGE); } digest = calculate_on_disk_digest(input); fprintf(stderr, "Digest: "); fprintf(stdout, "%s\n", crm_str(digest)); free(digest); free_xml(input); crm_exit(CRM_EX_OK); } else if (safe_str_eq(cib_action, "md5-sum-versioned")) { char *digest = NULL; const char *version = NULL; if (input == NULL) { fprintf(stderr, "Please supply XML to process with -X, -x or -p\n"); crm_exit(CRM_EX_USAGE); } version = crm_element_value(input, XML_ATTR_CRM_VERSION); digest = calculate_xml_versioned_digest(input, FALSE, TRUE, version); fprintf(stderr, "Versioned (%s) digest: ", version); fprintf(stdout, "%s\n", crm_str(digest)); free(digest); free_xml(input); crm_exit(CRM_EX_OK); } rc = do_init(); if (rc != pcmk_ok) { crm_err("Init failed, could not perform requested operations"); fprintf(stderr, "Init failed, could not perform requested operations\n"); free_xml(input); crm_exit(crm_errno2exit(rc)); } rc = do_work(input, command_options, &output); if (rc > 0) { /* wait for the reply by creating a mainloop and running it until * the callbacks are invoked... */ request_id = rc; the_cib->cmds->register_callback(the_cib, request_id, message_timeout_ms, FALSE, NULL, "cibadmin_op_callback", cibadmin_op_callback); mainloop = g_main_loop_new(NULL, FALSE); crm_trace("%s waiting for reply from the local CIB", crm_system_name); crm_info("Starting mainloop"); g_main_loop_run(mainloop); } else if ((rc == -pcmk_err_schema_unchanged) && crm_str_eq(cib_action, CIB_OP_UPGRADE, TRUE)) { report_schema_unchanged(); } else if (rc < 0) { crm_err("Call failed: %s", pcmk_strerror(rc)); fprintf(stderr, "Call failed: %s\n", pcmk_strerror(rc)); if (rc == -pcmk_err_schema_validation) { if (crm_str_eq(cib_action, CIB_OP_UPGRADE, TRUE)) { xmlNode *obj = NULL; int version = 0, rc = 0; rc = the_cib->cmds->query(the_cib, NULL, &obj, command_options); if (rc == pcmk_ok) { update_validation(&obj, &version, 0, TRUE, FALSE); } } else if (output) { validate_xml_verbose(output); } } exit_code = crm_errno2exit(rc); } if (output != NULL) { print_xml_output(output); free_xml(output); } crm_trace("%s exiting normally", crm_system_name); free_xml(input); rc = the_cib->cmds->signoff(the_cib); if (exit_code == CRM_EX_OK) { exit_code = crm_errno2exit(rc); } cib_delete(the_cib); crm_exit(exit_code); } int do_work(xmlNode * input, int call_options, xmlNode ** output) { /* construct the request */ the_cib->call_timeout = message_timeout_ms; if (strcasecmp(CIB_OP_REPLACE, cib_action) == 0 && safe_str_eq(crm_element_name(input), XML_TAG_CIB)) { xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, input); if (status == NULL) { create_xml_node(input, XML_CIB_TAG_STATUS); } } if (cib_action != NULL) { crm_trace("Passing \"%s\" to variant_op...", cib_action); return cib_internal_op(the_cib, cib_action, host, obj_type, input, output, call_options, cib_user); } else { crm_err("You must specify an operation"); } return -EINVAL; } int do_init(void) { int rc = pcmk_ok; the_cib = cib_new(); rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); if (rc != pcmk_ok) { - crm_err("Connection to the CIB manager failed: %s", pcmk_strerror(rc)); - fprintf(stderr, "Connection to the CIB manager failed: %s\n", + crm_err("Could not connect to the CIB: %s", pcmk_strerror(rc)); + fprintf(stderr, "Could not connect to the CIB: %s\n", pcmk_strerror(rc)); } return rc; } void cibadmin_op_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { exit_code = crm_errno2exit(rc); if (rc == -pcmk_err_schema_unchanged) { report_schema_unchanged(); } else if (rc != pcmk_ok) { crm_warn("Call %s failed (%d): %s", cib_action, rc, pcmk_strerror(rc)); fprintf(stderr, "Call %s failed (%d): %s\n", cib_action, rc, pcmk_strerror(rc)); print_xml_output(output); } else if (safe_str_eq(cib_action, CIB_OP_QUERY) && output == NULL) { crm_err("Query returned no output"); crm_log_xml_err(msg, "no output"); } else if (output == NULL) { crm_info("Call passed"); } else { crm_info("Call passed"); print_xml_output(output); } if (call_id == request_id) { g_main_loop_quit(mainloop); } else { crm_info("Message was not the response we were looking for (%d vs. %d)", call_id, request_id); } } diff --git a/tools/crm_attribute.c b/tools/crm_attribute.c index 6445d11620..dddb14a601 100644 --- a/tools/crm_attribute.c +++ b/tools/crm_attribute.c @@ -1,494 +1,494 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include gboolean BE_QUIET = FALSE; char command = 'G'; const char *dest_uname = NULL; char *dest_node = NULL; char *set_name = NULL; char *attr_id = NULL; char *attr_name = NULL; char *attr_pattern = NULL; const char *type = NULL; const char *rsc_id = NULL; const char *attr_value = NULL; const char *attr_default = NULL; const char *set_type = NULL; static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "help", no_argument, NULL, '?', "\tThis text", pcmk__option_default }, { "version", no_argument, NULL, '$', "\tVersion information", pcmk__option_default }, { "verbose", no_argument, NULL, 'V', "\tIncrease debug output", pcmk__option_default }, { "quiet", no_argument, NULL, 'q', "\tPrint only the value on stdout\n", pcmk__option_default }, { "name", required_argument, NULL, 'n', "Name of the attribute/option to operate on", pcmk__option_default }, { "pattern", required_argument, NULL, 'P', "Pattern matching names of attributes (only with -v/-D and -l reboot)", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nCommands:", pcmk__option_default }, { "query", no_argument, NULL, 'G', "\tQuery the current value of the attribute/option", pcmk__option_default }, { "update", required_argument, NULL, 'v', "Update the value of the attribute/option", pcmk__option_default }, { "delete", no_argument, NULL, 'D', "\tDelete the attribute/option", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:", pcmk__option_default }, { "node", required_argument, NULL, 'N', "Set a node attribute for named node (instead of a cluster option). " "See also: -l", pcmk__option_default }, { "type", required_argument, NULL, 't', "Which part of the configuration to update/delete/query the option in", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\t\t\tValid values: crm_config, rsc_defaults, op_defaults, tickets", pcmk__option_default }, { "lifetime", required_argument, NULL, 'l', "Lifetime of the node attribute", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\t\t\tValid values: reboot, forever", pcmk__option_default }, { "utilization", no_argument, NULL, 'z', "Set an utilization attribute for the node.", pcmk__option_default }, { "set-name", required_argument, NULL, 's', "(Advanced) The attribute set in which to place the value", pcmk__option_default }, { "id", required_argument, NULL, 'i', "\t(Advanced) The ID used to identify the attribute", pcmk__option_default }, { "default", required_argument, NULL, 'd', "(Advanced) Default value to display if none is found in configuration", pcmk__option_default }, { "inhibit-policy-engine", no_argument, NULL, '!', NULL, pcmk__option_hidden }, /* legacy */ { "quiet", no_argument, NULL, 'Q', NULL, pcmk__option_hidden }, { "node-uname", required_argument, NULL, 'U', NULL, pcmk__option_hidden }, { "get-value", no_argument, NULL, 'G', NULL, pcmk__option_hidden }, { "delete-attr", no_argument, NULL, 'D', NULL, pcmk__option_hidden }, { "attr-value", required_argument, NULL, 'v', NULL, pcmk__option_hidden }, { "attr-name", required_argument, NULL, 'n', NULL, pcmk__option_hidden }, { "attr-id", required_argument, NULL, 'i', NULL, pcmk__option_hidden }, { "-spacer-", no_argument, NULL, '-', "\nExamples:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', "Add new node attribute called 'location' with the value of 'office' " "for host 'myhost':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_attribute --node myhost --name location --update office", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Query the value of the 'location' node attribute for host 'myhost':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_attribute --node myhost --name location --query", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Change the value of the 'location' node attribute for host 'myhost':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_attribute --node myhost --name location --update backoffice", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Delete the 'location' node attribute for host 'myhost':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_attribute --node myhost --name location --delete", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Query the value of the cluster-delay cluster option:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_attribute --type crm_config --name cluster-delay --query", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Query value of the \"cluster-delay\" cluster option and print only " "the value:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_attribute --type crm_config --name cluster-delay --query --quiet", pcmk__option_example }, { 0, 0, 0, 0 } }; int main(int argc, char **argv) { cib_t *the_cib = NULL; int rc = pcmk_ok; int cib_opts = cib_sync_call; int argerr = 0; int flag; int option_index = 0; int is_remote_node = 0; bool try_attrd = true; int attrd_opts = pcmk__node_attr_none; crm_log_cli_init("crm_attribute"); pcmk__set_cli_options(NULL, "-n [options]", long_options, "query and update Pacemaker cluster options " "and node attributes"); if (argc < 2) { pcmk__cli_help('?', CRM_EX_USAGE); } while (1) { flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case '$': case '?': pcmk__cli_help(flag, CRM_EX_OK); break; case 'G': command = flag; attr_value = optarg; break; case 'D': case 'v': command = flag; attr_value = optarg; crm_log_args(argc, argv); break; case 'q': case 'Q': BE_QUIET = TRUE; break; case 'U': case 'N': dest_uname = strdup(optarg); break; case 's': set_name = strdup(optarg); break; case 'l': case 't': type = optarg; break; case 'z': type = XML_CIB_TAG_NODES; set_type = XML_TAG_UTILIZATION; break; case 'n': attr_name = strdup(optarg); break; case 'P': attr_pattern = strdup(optarg); break; case 'i': attr_id = strdup(optarg); break; case 'r': rsc_id = optarg; break; case 'd': attr_default = optarg; break; case '!': crm_warn("Inhibiting notifications for this update"); cib_opts |= cib_inhibit_notify; break; default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); } if (optind > argc) { ++argerr; } if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } the_cib = cib_new(); rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); if (rc != pcmk_ok) { - fprintf(stderr, "Error connecting to the CIB manager: %s\n", + fprintf(stderr, "Could not connect to the CIB: %s\n", pcmk_strerror(rc)); crm_exit(crm_errno2exit(rc)); } if (type == NULL && dest_uname != NULL) { type = "forever"; } if (safe_str_eq(type, "reboot")) { type = XML_CIB_TAG_STATUS; } else if (safe_str_eq(type, "forever")) { type = XML_CIB_TAG_NODES; } if (type == NULL && dest_uname == NULL) { /* we're updating cluster options - don't populate dest_node */ type = XML_CIB_TAG_CRMCONFIG; } else if (safe_str_eq(type, XML_CIB_TAG_CRMCONFIG)) { } else if (safe_str_neq(type, XML_CIB_TAG_TICKETS)) { /* If we are being called from a resource agent via the cluster, * the correct local node name will be passed as an environment * variable. Otherwise, we have to ask the cluster. */ dest_uname = pcmk__node_attr_target(dest_uname); if (dest_uname == NULL) { dest_uname = get_local_node_name(); } rc = query_node_uuid(the_cib, dest_uname, &dest_node, &is_remote_node); if (pcmk_ok != rc) { fprintf(stderr, "Could not map name=%s to a UUID\n", dest_uname); the_cib->cmds->signoff(the_cib); cib_delete(the_cib); crm_exit(crm_errno2exit(rc)); } } if ((command == 'D') && (attr_name == NULL) && (attr_pattern == NULL)) { fprintf(stderr, "Error: must specify attribute name or pattern to delete\n"); crm_exit(CRM_EX_USAGE); } if (attr_pattern) { if (((command != 'v') && (command != 'D')) || safe_str_neq(type, XML_CIB_TAG_STATUS)) { fprintf(stderr, "Error: pattern can only be used with till-reboot update or delete\n"); crm_exit(CRM_EX_USAGE); } command = 'u'; free(attr_name); attr_name = attr_pattern; } // Only go through attribute manager for transient attributes try_attrd = safe_str_eq(type, XML_CIB_TAG_STATUS); // Don't try to contact attribute manager if we're using a file as CIB if (getenv("CIB_file") || getenv("CIB_shadow")) { try_attrd = FALSE; } if (is_remote_node) { attrd_opts = pcmk__node_attr_remote; } if (((command == 'v') || (command == 'D') || (command == 'u')) && try_attrd && (pcmk__node_attr_request(NULL, command, dest_uname, attr_name, attr_value, type, set_name, NULL, NULL, attrd_opts) == pcmk_rc_ok)) { crm_info("Update %s=%s sent via pacemaker-attrd", attr_name, ((command == 'D')? "" : attr_value)); } else if (command == 'D') { rc = delete_attr_delegate(the_cib, cib_opts, type, dest_node, set_type, set_name, attr_id, attr_name, attr_value, TRUE, NULL); if (rc == -ENXIO) { /* Nothing to delete... * which means it's not there... * which is what the admin wanted */ rc = pcmk_ok; } } else if (command == 'v') { CRM_LOG_ASSERT(type != NULL); CRM_LOG_ASSERT(attr_name != NULL); CRM_LOG_ASSERT(attr_value != NULL); rc = update_attr_delegate(the_cib, cib_opts, type, dest_node, set_type, set_name, attr_id, attr_name, attr_value, TRUE, NULL, is_remote_node ? "remote" : NULL); } else { /* query */ char *read_value = NULL; rc = read_attr_delegate(the_cib, type, dest_node, set_type, set_name, attr_id, attr_name, &read_value, TRUE, NULL); if (rc == -ENXIO && attr_default) { read_value = strdup(attr_default); rc = pcmk_ok; } crm_info("Read %s=%s %s%s", attr_name, crm_str(read_value), set_name ? "in " : "", set_name ? set_name : ""); if (rc == -ENOTUNIQ) { // Multiple matches (already displayed) are not error for queries rc = pcmk_ok; } else if (BE_QUIET == FALSE) { fprintf(stdout, "%s%s %s%s %s%s value=%s\n", type ? "scope=" : "", type ? type : "", attr_id ? "id=" : "", attr_id ? attr_id : "", attr_name ? "name=" : "", attr_name ? attr_name : "", read_value ? read_value : "(null)"); } else if (read_value != NULL) { fprintf(stdout, "%s\n", read_value); } free(read_value); } if (rc == -ENOTUNIQ) { printf("Please choose from one of the matches above and supply the 'id' with --attr-id\n"); } else if (rc != pcmk_ok) { fprintf(stderr, "Error performing operation: %s\n", pcmk_strerror(rc)); } the_cib->cmds->signoff(the_cib); cib_delete(the_cib); crm_exit(crm_errno2exit(rc)); } diff --git a/tools/crm_mon.c b/tools/crm_mon.c index ef357e7c69..1903e6cc08 100644 --- a/tools/crm_mon.c +++ b/tools/crm_mon.c @@ -1,2174 +1,2185 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // pcmk__ends_with_ext() #include #include #include #include #include #include #include #include #include #include #include #include #include "crm_mon.h" #define SUMMARY "Provides a summary of cluster's current state.\n\n" \ "Outputs varying levels of detail in a number of different formats." /* * Definitions indicating which items to print */ static unsigned int show; /* * Definitions indicating how to output */ static mon_output_format_t output_format = mon_output_unset; /* other globals */ static GIOChannel *io_channel = NULL; static GMainLoop *mainloop = NULL; static guint timer_id = 0; static mainloop_timer_t *refresh_timer = NULL; static pe_working_set_t *mon_data_set = NULL; static cib_t *cib = NULL; static stonith_t *st = NULL; static xmlNode *current_cib = NULL; static GError *error = NULL; static pcmk__common_args_t *args = NULL; static pcmk__output_t *out = NULL; static GOptionContext *context = NULL; static gchar **processed_args = NULL; static time_t last_refresh = 0; crm_trigger_t *refresh_trigger = NULL; static pcmk__supported_format_t formats[] = { #if CURSES_ENABLED CRM_MON_SUPPORTED_FORMAT_CURSES, #endif PCMK__SUPPORTED_FORMAT_HTML, PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, CRM_MON_SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; /* Define exit codes for monitoring-compatible output * For nagios plugins, the possibilities are * OK=0, WARN=1, CRIT=2, and UNKNOWN=3 */ #define MON_STATUS_WARN CRM_EX_ERROR #define MON_STATUS_CRIT CRM_EX_INVALID_PARAM #define MON_STATUS_UNKNOWN CRM_EX_UNIMPLEMENT_FEATURE #define RECONNECT_MSECS 5000 struct { int reconnect_msec; gboolean daemonize; gboolean show_bans; char *pid_file; char *external_agent; char *external_recipient; char *neg_location_prefix; unsigned int mon_ops; GSList *user_includes_excludes; GSList *includes_excludes; } options = { .reconnect_msec = RECONNECT_MSECS, .mon_ops = mon_op_default }; static void clean_up_connections(void); static crm_exit_t clean_up(crm_exit_t exit_code); static void crm_diff_update(const char *event, xmlNode * msg); static gboolean mon_refresh_display(gpointer user_data); static int cib_connect(gboolean full); static void mon_st_callback_event(stonith_t * st, stonith_event_t * e); static void mon_st_callback_display(stonith_t * st, stonith_event_t * e); static void kick_refresh(gboolean data_updated); static unsigned int all_includes(mon_output_format_t fmt) { if (fmt == mon_output_monitor || fmt == mon_output_plain || fmt == mon_output_console) { return ~mon_show_options; } else { return mon_show_all; } } static unsigned int default_includes(mon_output_format_t fmt) { switch (fmt) { case mon_output_monitor: case mon_output_plain: case mon_output_console: return mon_show_stack | mon_show_dc | mon_show_times | mon_show_counts | mon_show_nodes | mon_show_resources | mon_show_failures; break; case mon_output_xml: case mon_output_legacy_xml: return all_includes(fmt); break; case mon_output_html: case mon_output_cgi: return mon_show_summary | mon_show_nodes | mon_show_resources | mon_show_failures; break; default: return 0; break; } } struct { const char *name; unsigned int bit; } sections[] = { { "attributes", mon_show_attributes }, { "bans", mon_show_bans }, { "counts", mon_show_counts }, { "dc", mon_show_dc }, { "failcounts", mon_show_failcounts }, { "failures", mon_show_failures }, { "fencing", mon_show_fencing_all }, { "fencing-failed", mon_show_fence_failed }, { "fencing-pending", mon_show_fence_pending }, { "fencing-succeeded", mon_show_fence_worked }, { "nodes", mon_show_nodes }, { "operations", mon_show_operations }, { "options", mon_show_options }, { "resources", mon_show_resources }, { "stack", mon_show_stack }, { "summary", mon_show_summary }, { "tickets", mon_show_tickets }, { "times", mon_show_times }, { NULL } }; static unsigned int find_section_bit(const char *name) { for (int i = 0; sections[i].name != NULL; i++) { if (crm_str_eq(sections[i].name, name, FALSE)) { return sections[i].bit; } } return 0; } static gboolean apply_exclude(const gchar *excludes, GError **error) { char **parts = NULL; parts = g_strsplit(excludes, ",", 0); for (char **s = parts; *s != NULL; s++) { unsigned int bit = find_section_bit(*s); if (crm_str_eq(*s, "all", TRUE)) { show = 0; } else if (crm_str_eq(*s, "none", TRUE)) { show = all_includes(output_format); } else if (bit != 0) { show &= ~bit; } else { g_set_error(error, G_OPTION_ERROR, CRM_EX_USAGE, "--exclude options: all, attributes, bans, counts, dc, " "failcounts, failures, fencing, fencing-failed, " "fencing-pending, fencing-succeeded, nodes, none, " "operations, options, resources, stack, summary, " "tickets, times"); return FALSE; } } g_strfreev(parts); return TRUE; } static gboolean apply_include(const gchar *includes, GError **error) { char **parts = NULL; parts = g_strsplit(includes, ",", 0); for (char **s = parts; *s != NULL; s++) { unsigned int bit = find_section_bit(*s); if (crm_str_eq(*s, "all", TRUE)) { show = all_includes(output_format); } else if (pcmk__starts_with(*s, "bans")) { show |= mon_show_bans; if (options.neg_location_prefix != NULL) { free(options.neg_location_prefix); options.neg_location_prefix = NULL; } if (strlen(*s) > 4 && (*s)[4] == ':') { options.neg_location_prefix = strdup(*s+5); } } else if (crm_str_eq(*s, "default", TRUE) || crm_str_eq(*s, "defaults", TRUE)) { show |= default_includes(output_format); } else if (crm_str_eq(*s, "none", TRUE)) { show = 0; } else if (bit != 0) { show |= bit; } else { g_set_error(error, G_OPTION_ERROR, CRM_EX_USAGE, "--include options: all, attributes, bans[:PREFIX], counts, dc, " "default, failcounts, failures, fencing, fencing-failed, " "fencing-pending, fencing-succeeded, nodes, none, operations, " "options, resources, stack, summary, tickets, times"); return FALSE; } } g_strfreev(parts); return TRUE; } static gboolean apply_include_exclude(GSList *lst, mon_output_format_t fmt, GError **error) { gboolean rc = TRUE; GSList *node = lst; /* Set the default of what to display here. Note that we OR everything to * show instead of set show directly because it could have already had some * settings applied to it in main. */ show |= default_includes(fmt); while (node != NULL) { char *s = node->data; if (pcmk__starts_with(s, "--include=")) { rc = apply_include(s+10, error); } else if (pcmk__starts_with(s, "-I=")) { rc = apply_include(s+3, error); } else if (pcmk__starts_with(s, "--exclude=")) { rc = apply_exclude(s+10, error); } else if (pcmk__starts_with(s, "-U=")) { rc = apply_exclude(s+3, error); } if (rc != TRUE) { break; } node = node->next; } return rc; } static gboolean user_include_exclude_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { char *s = crm_strdup_printf("%s=%s", option_name, optarg); options.user_includes_excludes = g_slist_append(options.user_includes_excludes, s); return TRUE; } static gboolean include_exclude_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { char *s = crm_strdup_printf("%s=%s", option_name, optarg); options.includes_excludes = g_slist_append(options.includes_excludes, s); return TRUE; } static gboolean as_cgi_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { if (args->output_ty != NULL) { free(args->output_ty); } args->output_ty = strdup("html"); output_format = mon_output_cgi; options.mon_ops |= mon_op_one_shot; return TRUE; } static gboolean as_html_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { if (args->output_ty != NULL) { free(args->output_ty); } if (args->output_dest != NULL) { free(args->output_dest); } if (optarg != NULL) { args->output_dest = strdup(optarg); } args->output_ty = strdup("html"); output_format = mon_output_html; umask(S_IWGRP | S_IWOTH); return TRUE; } static gboolean as_simple_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { if (args->output_ty != NULL) { free(args->output_ty); } args->output_ty = strdup("text"); output_format = mon_output_monitor; options.mon_ops |= mon_op_one_shot; return TRUE; } static gboolean as_xml_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { if (args->output_ty != NULL) { free(args->output_ty); } args->output_ty = strdup("xml"); output_format = mon_output_legacy_xml; options.mon_ops |= mon_op_one_shot; return TRUE; } static gboolean fence_history_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { int rc = crm_atoi(optarg, "2"); switch (rc) { case 3: options.mon_ops |= mon_op_fence_full_history | mon_op_fence_history | mon_op_fence_connect; return include_exclude_cb("--include", "fencing", data, err); case 2: options.mon_ops |= mon_op_fence_history | mon_op_fence_connect; return include_exclude_cb("--include", "fencing", data, err); case 1: options.mon_ops |= mon_op_fence_history | mon_op_fence_connect; return include_exclude_cb("--include", "fencing-failed,fencing-pending", data, err); case 0: options.mon_ops &= ~(mon_op_fence_history | mon_op_fence_connect); return include_exclude_cb("--exclude", "fencing", data, err); default: g_set_error(err, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, "Fence history must be 0-3"); return FALSE; } } static gboolean group_by_node_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.mon_ops |= mon_op_group_by_node; return TRUE; } static gboolean hide_headers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { return include_exclude_cb("--exclude", "summary", data, err); } static gboolean inactive_resources_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.mon_ops |= mon_op_inactive_resources; return TRUE; } static gboolean no_curses_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { output_format = mon_output_plain; return TRUE; } static gboolean one_shot_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.mon_ops |= mon_op_one_shot; return TRUE; } static gboolean print_brief_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.mon_ops |= mon_op_print_brief; return TRUE; } static gboolean print_clone_detail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.mon_ops |= mon_op_print_clone_detail; return TRUE; } static gboolean print_pending_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.mon_ops |= mon_op_print_pending; return TRUE; } static gboolean print_timing_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.mon_ops |= mon_op_print_timing; return include_exclude_cb("--include", "operations", data, err); } static gboolean reconnect_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { int rc = crm_get_msec(optarg); if (rc == -1) { g_set_error(err, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, "Invalid value for -i: %s", optarg); return FALSE; } else { options.reconnect_msec = crm_parse_interval_spec(optarg); } return TRUE; } static gboolean show_attributes_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { return include_exclude_cb("--include", "attributes", data, err); } static gboolean show_bans_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { if (optarg != NULL) { char *s = crm_strdup_printf("bans:%s", optarg); gboolean rc = include_exclude_cb("--include", s, data, err); free(s); return rc; } else { return include_exclude_cb("--include", "bans", data, err); } } static gboolean show_failcounts_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { return include_exclude_cb("--include", "failcounts", data, err); } static gboolean show_operations_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { return include_exclude_cb("--include", "failcounts,operations", data, err); } static gboolean show_tickets_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { return include_exclude_cb("--include", "tickets", data, err); } static gboolean use_cib_file_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { setenv("CIB_file", optarg, 1); options.mon_ops |= mon_op_one_shot; return TRUE; } static gboolean watch_fencing_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { options.mon_ops |= mon_op_watch_fencing; return TRUE; } #define INDENT " " /* *INDENT-OFF* */ static GOptionEntry addl_entries[] = { { "interval", 'i', 0, G_OPTION_ARG_CALLBACK, reconnect_cb, "Update frequency (default is 5 seconds)", "TIMESPEC" }, { "one-shot", '1', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, one_shot_cb, "Display the cluster status once on the console and exit", NULL }, { "daemonize", 'd', 0, G_OPTION_ARG_NONE, &options.daemonize, "Run in the background as a daemon.\n" INDENT "Requires at least one of --output-to and --external-agent.", NULL }, { "pid-file", 'p', 0, G_OPTION_ARG_FILENAME, &options.pid_file, "(Advanced) Daemon pid file location", "FILE" }, { "external-agent", 'E', 0, G_OPTION_ARG_FILENAME, &options.external_agent, "A program to run when resource operations take place", "FILE" }, { "external-recipient", 'e', 0, G_OPTION_ARG_STRING, &options.external_recipient, "A recipient for your program (assuming you want the program to send something to someone).", "RCPT" }, { "watch-fencing", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, watch_fencing_cb, "Listen for fencing events. For use with --external-agent.", NULL }, { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, use_cib_file_cb, NULL, NULL }, { NULL } }; static GOptionEntry display_entries[] = { { "include", 'I', 0, G_OPTION_ARG_CALLBACK, user_include_exclude_cb, "A list of sections to include in the output.\n" INDENT "See `Output Control` help for more information.", "SECTION(s)" }, { "exclude", 'U', 0, G_OPTION_ARG_CALLBACK, user_include_exclude_cb, "A list of sections to exclude from the output.\n" INDENT "See `Output Control` help for more information.", "SECTION(s)" }, { "group-by-node", 'n', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, group_by_node_cb, "Group resources by node", NULL }, { "inactive", 'r', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, inactive_resources_cb, "Display inactive resources", NULL }, { "failcounts", 'f', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_failcounts_cb, "Display resource fail counts", NULL }, { "operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_operations_cb, "Display resource operation history", NULL }, { "timing-details", 't', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_timing_cb, "Display resource operation history with timing details", NULL }, { "tickets", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_tickets_cb, "Display cluster tickets", NULL }, { "fence-history", 'm', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, fence_history_cb, "Show fence history:\n" INDENT "0=off, 1=failures and pending (default without option),\n" INDENT "2=add successes (default without value for option),\n" INDENT "3=show full history without reduction to most recent of each flavor", "LEVEL" }, { "neg-locations", 'L', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, show_bans_cb, "Display negative location constraints [optionally filtered by id prefix]", NULL }, { "show-node-attributes", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_attributes_cb, "Display node attributes", NULL }, { "hide-headers", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, hide_headers_cb, "Hide all headers", NULL }, { "show-detail", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_clone_detail_cb, "Show more details (node IDs, individual clone instances)", NULL }, { "brief", 'b', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_brief_cb, "Brief output", NULL }, { "pending", 'j', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_pending_cb, "Display pending state if 'record-pending' is enabled", NULL }, { "simple-status", 's', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_simple_cb, "Display the cluster status once as a simple one line output (suitable for nagios)", NULL }, { NULL } }; static GOptionEntry deprecated_entries[] = { { "as-html", 'h', G_OPTION_FLAG_FILENAME, G_OPTION_ARG_CALLBACK, as_html_cb, "Write cluster status to the named HTML file.\n" INDENT "Use --output-as=html --output-to=FILE instead.", "FILE" }, { "as-xml", 'X', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_xml_cb, "Write cluster status as XML to stdout. This will enable one-shot mode.\n" INDENT "Use --output-as=xml instead.", NULL }, { "disable-ncurses", 'N', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, no_curses_cb, "Disable the use of ncurses.\n" INDENT "Use --output-as=text instead.", NULL }, { "web-cgi", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_cgi_cb, "Web mode with output suitable for CGI (preselected when run as *.cgi).\n" INDENT "Use --output-as=html --html-cgi instead.", NULL }, { NULL } }; /* *INDENT-ON* */ static gboolean mon_timer_popped(gpointer data) { int rc = pcmk_ok; #if CURSES_ENABLED if (output_format == mon_output_console) { clear(); refresh(); } #endif if (timer_id > 0) { g_source_remove(timer_id); timer_id = 0; } print_as(output_format, "Reconnecting...\n"); rc = cib_connect(TRUE); if (rc != pcmk_ok) { timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL); } return FALSE; } static void mon_cib_connection_destroy(gpointer user_data) { out->err(out, "Connection to the cluster-daemons terminated"); if (refresh_timer != NULL) { /* we'll trigger a refresh after reconnect */ mainloop_timer_stop(refresh_timer); } if (timer_id) { /* we'll trigger a new reconnect-timeout at the end */ g_source_remove(timer_id); timer_id = 0; } if (st) { /* the client API won't properly reconnect notifications * if they are still in the table - so remove them */ st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT); st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE); st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY); if (st->state != stonith_disconnected) { st->cmds->disconnect(st); } } if (cib) { cib->cmds->signoff(cib); timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL); } return; } /* * Mainloop signal handler. */ static void mon_shutdown(int nsig) { clean_up(CRM_EX_OK); } #if CURSES_ENABLED static sighandler_t ncurses_winch_handler; static void mon_winresize(int nsig) { static int not_done; int lines = 0, cols = 0; if (!not_done++) { if (ncurses_winch_handler) /* the original ncurses WINCH signal handler does the * magic of retrieving the new window size; * otherwise, we'd have to use ioctl or tgetent */ (*ncurses_winch_handler) (SIGWINCH); getmaxyx(stdscr, lines, cols); resizeterm(lines, cols); mainloop_set_trigger(refresh_trigger); } not_done--; } #endif static int cib_connect(gboolean full) { int rc = pcmk_ok; static gboolean need_pass = TRUE; CRM_CHECK(cib != NULL, return -EINVAL); if (getenv("CIB_passwd") != NULL) { need_pass = FALSE; } if (is_set(options.mon_ops, mon_op_fence_connect) && st == NULL) { st = stonith_api_new(); } if (is_set(options.mon_ops, mon_op_fence_connect) && st != NULL && st->state == stonith_disconnected) { rc = st->cmds->connect(st, crm_system_name, NULL); if (rc == pcmk_ok) { crm_trace("Setting up stonith callbacks"); if (is_set(options.mon_ops, mon_op_watch_fencing)) { st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, mon_st_callback_event); st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback_event); } else { st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, mon_st_callback_display); st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display); } } } if (cib->state != cib_connected_query && cib->state != cib_connected_command) { crm_trace("Connecting to the CIB"); + + /* Hack: the CIB signon will print the prompt for a password if needed, + * but to stderr. If we're in curses, show it on the screen instead. + * + * @TODO Add a password prompt (maybe including input) function to + * pcmk__output_t and use it in libcib. + */ if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) { need_pass = FALSE; print_as(output_format, "Password:"); } rc = cib->cmds->signon(cib, crm_system_name, cib_query); - if (rc != pcmk_ok) { + out->err(out, "Could not connect to the CIB: %s", + pcmk_strerror(rc)); + if (output_format == mon_output_console) { + sleep(2); + } return rc; } rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); if (rc == pcmk_ok) { mon_refresh_display(&output_format); } if (rc == pcmk_ok && full) { if (rc == pcmk_ok) { rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy); if (rc == -EPROTONOSUPPORT) { print_as (output_format, "Notification setup not supported, won't be able to reconnect after failure"); if (output_format == mon_output_console) { sleep(2); } rc = pcmk_ok; } } if (rc == pcmk_ok) { cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); } if (rc != pcmk_ok) { out->err(out, "Notification setup failed, could not monitor CIB actions"); if (output_format == mon_output_console) { sleep(2); } clean_up_connections(); } } } return rc; } #if CURSES_ENABLED static const char * get_option_desc(char c) { const char *desc = "No help available"; for (GOptionEntry *entry = display_entries; entry != NULL; entry++) { if (entry->short_name == c) { desc = entry->description; break; } } return desc; } #define print_option_help(output_format, option, condition) \ out->info(out, "%c %c: \t%s", ((condition)? '*': ' '), option, get_option_desc(option)); static gboolean detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_data) { int c; gboolean config_mode = FALSE; while (1) { /* Get user input */ c = getchar(); switch (c) { case 'm': if (is_not_set(show, mon_show_fencing_all)) { options.mon_ops |= mon_op_fence_history; options.mon_ops |= mon_op_fence_connect; if (st == NULL) { mon_cib_connection_destroy(NULL); } } if (is_set(show, mon_show_fence_failed) || is_set(show, mon_show_fence_pending) || is_set(show, mon_show_fence_worked)) { show &= ~mon_show_fencing_all; } else { show |= mon_show_fencing_all; } break; case 'c': show ^= mon_show_tickets; break; case 'f': show ^= mon_show_failcounts; break; case 'n': options.mon_ops ^= mon_op_group_by_node; break; case 'o': show ^= mon_show_operations; if (is_not_set(show, mon_show_operations)) { options.mon_ops &= ~mon_op_print_timing; } break; case 'r': options.mon_ops ^= mon_op_inactive_resources; break; case 'R': options.mon_ops ^= mon_op_print_clone_detail; break; case 't': options.mon_ops ^= mon_op_print_timing; if (is_set(options.mon_ops, mon_op_print_timing)) { show |= mon_show_operations; } break; case 'A': show ^= mon_show_attributes; break; case 'L': show ^= mon_show_bans; break; case 'D': /* If any header is shown, clear them all, otherwise set them all */ if (is_set(show, mon_show_stack) || is_set(show, mon_show_dc) || is_set(show, mon_show_times) || is_set(show, mon_show_counts)) { show &= ~mon_show_summary; } else { show |= mon_show_summary; } /* Regardless, we don't show options in console mode. */ show &= ~mon_show_options; break; case 'b': options.mon_ops ^= mon_op_print_brief; break; case 'j': options.mon_ops ^= mon_op_print_pending; break; case '?': config_mode = TRUE; break; default: goto refresh; } if (!config_mode) goto refresh; blank_screen(); out->info(out, "%s", "Display option change mode\n"); print_option_help(out, 'c', is_set(show, mon_show_tickets)); print_option_help(out, 'f', is_set(show, mon_show_failcounts)); print_option_help(out, 'n', is_set(options.mon_ops, mon_op_group_by_node)); print_option_help(out, 'o', is_set(show, mon_show_operations)); print_option_help(out, 'r', is_set(options.mon_ops, mon_op_inactive_resources)); print_option_help(out, 't', is_set(options.mon_ops, mon_op_print_timing)); print_option_help(out, 'A', is_set(show, mon_show_attributes)); print_option_help(out, 'L', is_set(show,mon_show_bans)); print_option_help(out, 'D', is_not_set(show, mon_show_summary)); print_option_help(out, 'R', is_set(options.mon_ops, mon_op_print_clone_detail)); print_option_help(out, 'b', is_set(options.mon_ops, mon_op_print_brief)); print_option_help(out, 'j', is_set(options.mon_ops, mon_op_print_pending)); print_option_help(out, 'm', is_set(show, mon_show_fencing_all)); out->info(out, "%s", "\nToggle fields via field letter, type any other key to return"); } refresh: mon_refresh_display(NULL); return TRUE; } #endif // Basically crm_signal_handler(SIGCHLD, SIG_IGN) plus the SA_NOCLDWAIT flag static void avoid_zombies() { struct sigaction sa; memset(&sa, 0, sizeof(struct sigaction)); if (sigemptyset(&sa.sa_mask) < 0) { crm_warn("Cannot avoid zombies: %s", pcmk_strerror(errno)); return; } sa.sa_handler = SIG_IGN; sa.sa_flags = SA_RESTART|SA_NOCLDWAIT; if (sigaction(SIGCHLD, &sa, NULL) < 0) { crm_warn("Cannot avoid zombies: %s", pcmk_strerror(errno)); } } static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; GOptionEntry extra_prog_entries[] = { { "quiet", 'Q', 0, G_OPTION_ARG_NONE, &(args->quiet), "Be less descriptive in output.", NULL }, { NULL } }; const char *description = "Notes:\n\n" "If this program is called as crm_mon.cgi, --output-as=html --html-cgi will\n" "automatically be added to the command line arguments.\n\n" "Time Specification:\n\n" "The TIMESPEC in any command line option can be specified in many different\n" "formats. It can be just an integer number of seconds, a number plus units\n" "(ms/msec/us/usec/s/sec/m/min/h/hr), or an ISO 8601 period specification.\n\n" "Output Control:\n\n" "By default, a certain list of sections are written to the output destination.\n" "The default varies based on the output format - XML includes everything, while\n" "other output formats will display less. This list can be modified with the\n" "--include and --exclude command line options. Each option may be given multiple\n" "times on the command line, and each can give a comma-separated list of sections.\n" "The options are applied to the default set, from left to right as seen on the\n" "command line. For a list of valid sections, pass --include=list or --exclude=list.\n\n" "Examples:\n\n" "Display the cluster status on the console with updates as they occur:\n\n" "\tcrm_mon\n\n" "Display the cluster status on the console just once then exit:\n\n" "\tcrm_mon -1\n\n" "Display your cluster status, group resources by node, and include inactive resources in the list:\n\n" "\tcrm_mon --group-by-node --inactive\n\n" "Start crm_mon as a background daemon and have it write the cluster status to an HTML file:\n\n" "\tcrm_mon --daemonize --output-as html --output-to /path/to/docroot/filename.html\n\n" "Start crm_mon and export the current cluster status as XML to stdout, then exit:\n\n" "\tcrm_mon --output-as xml\n\n"; context = pcmk__build_arg_context(args, "console (default), html, text, xml", group); pcmk__add_main_args(context, extra_prog_entries); g_option_context_set_description(context, description); pcmk__add_arg_group(context, "display", "Display Options:", "Show display options", display_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); pcmk__add_arg_group(context, "deprecated", "Deprecated Options:", "Show deprecated options", deprecated_entries); return context; } /* If certain format options were specified, we want to set some extra * options. We can just process these like they were given on the * command line. */ static void add_output_args() { GError *err = NULL; if (output_format == mon_output_plain) { if (!pcmk__force_args(context, &err, "%s --text-fancy", g_get_prgname())) { g_propagate_error(&error, err); clean_up(CRM_EX_USAGE); } } else if (output_format == mon_output_html) { if (!pcmk__force_args(context, &err, "%s --html-title \"Cluster Status\"", g_get_prgname())) { g_propagate_error(&error, err); clean_up(CRM_EX_USAGE); } } else if (output_format == mon_output_cgi) { if (!pcmk__force_args(context, &err, "%s --html-cgi --html-title \"Cluster Status\"", g_get_prgname())) { g_propagate_error(&error, err); clean_up(CRM_EX_USAGE); } } else if (output_format == mon_output_xml) { if (!pcmk__force_args(context, &err, "%s --xml-simple-list", g_get_prgname())) { g_propagate_error(&error, err); clean_up(CRM_EX_USAGE); } } else if (output_format == mon_output_legacy_xml) { output_format = mon_output_xml; if (!pcmk__force_args(context, &err, "%s --xml-legacy", g_get_prgname())) { g_propagate_error(&error, err); clean_up(CRM_EX_USAGE); } } } /* Which output format to use could come from two places: The --as-xml * style arguments we gave in deprecated_entries above, or the formatted output * arguments added by pcmk__register_formats. If the latter were used, * output_format will be mon_output_unset. * * Call the callbacks as if those older style arguments were provided so * the various things they do get done. */ static void reconcile_output_format(pcmk__common_args_t *args) { gboolean retval = TRUE; GError *err = NULL; if (output_format != mon_output_unset) { return; } if (safe_str_eq(args->output_ty, "html")) { char *dest = NULL; if (args->output_dest != NULL) { dest = strdup(args->output_dest); } retval = as_html_cb("h", dest, NULL, &err); free(dest); } else if (safe_str_eq(args->output_ty, "text")) { retval = no_curses_cb("N", NULL, NULL, &err); } else if (safe_str_eq(args->output_ty, "xml")) { if (args->output_ty != NULL) { free(args->output_ty); } args->output_ty = strdup("xml"); output_format = mon_output_xml; options.mon_ops |= mon_op_one_shot; } else if (is_set(options.mon_ops, mon_op_one_shot)) { if (args->output_ty != NULL) { free(args->output_ty); } args->output_ty = strdup("text"); output_format = mon_output_plain; } else { /* Neither old nor new arguments were given, so set the default. */ if (args->output_ty != NULL) { free(args->output_ty); } args->output_ty = strdup("console"); output_format = mon_output_console; } if (!retval) { g_propagate_error(&error, err); clean_up(CRM_EX_USAGE); } } int main(int argc, char **argv) { int rc = pcmk_ok; GOptionGroup *output_group = NULL; args = pcmk__new_common_args(SUMMARY); context = build_arg_context(args, &output_group); pcmk__register_formats(output_group, formats); options.pid_file = strdup("/tmp/ClusterMon.pid"); crm_log_cli_init("crm_mon"); // Avoid needing to wait for subprocesses forked for -E/--external-agent avoid_zombies(); if (pcmk__ends_with_ext(argv[0], ".cgi")) { output_format = mon_output_cgi; options.mon_ops |= mon_op_one_shot; } processed_args = pcmk__cmdline_preproc(argv, "ehimpxEILU"); fence_history_cb("--fence-history", "1", NULL, NULL); if (!g_option_context_parse_strv(context, &processed_args, &error)) { return clean_up(CRM_EX_USAGE); } for (int i = 0; i < args->verbosity; i++) { crm_bump_log_level(argc, argv); } if (!args->version) { if (args->quiet) { include_exclude_cb("--exclude", "times", NULL, NULL); } if (is_set(options.mon_ops, mon_op_watch_fencing)) { fence_history_cb("--fence-history", "0", NULL, NULL); options.mon_ops |= mon_op_fence_connect; } /* create the cib-object early to be able to do further * decisions based on the cib-source */ cib = cib_new(); if (cib == NULL) { rc = -EINVAL; } else { switch (cib->variant) { case cib_native: /* cib & fencing - everything available */ break; case cib_file: /* Don't try to connect to fencing as we * either don't have a running cluster or * the fencing-information would possibly * not match the cib data from a file. * As we don't expect cib-updates coming * in enforce one-shot. */ fence_history_cb("--fence-history", "0", NULL, NULL); options.mon_ops |= mon_op_one_shot; break; case cib_remote: /* updates coming in but no fencing */ fence_history_cb("--fence-history", "0", NULL, NULL); break; case cib_undefined: case cib_database: default: /* something is odd */ rc = -EINVAL; break; } } if (is_set(options.mon_ops, mon_op_one_shot)) { if (output_format == mon_output_console) { output_format = mon_output_plain; } } else if (options.daemonize) { if ((output_format == mon_output_console) || (output_format == mon_output_plain)) { output_format = mon_output_none; } crm_enable_stderr(FALSE); if ((args->output_dest == NULL || safe_str_eq(args->output_dest, "-")) && !options.external_agent) { g_set_error(&error, G_OPTION_ERROR, CRM_EX_USAGE, "--daemonize requires at least one of --output-to and --external-agent"); return clean_up(CRM_EX_USAGE); } if (cib) { /* to be on the safe side don't have cib-object around * when we are forking */ cib_delete(cib); cib = NULL; crm_make_daemon(crm_system_name, TRUE, options.pid_file); cib = cib_new(); if (cib == NULL) { rc = -EINVAL; } /* otherwise assume we've got the same cib-object we've just destroyed * in our parent */ } } else if (output_format == mon_output_console) { #if CURSES_ENABLED crm_enable_stderr(FALSE); #else options.mon_ops |= mon_op_one_shot; output_format = mon_output_plain; printf("Defaulting to one-shot mode\n"); printf("You need to have curses available at compile time to enable console mode\n"); #endif } } if (rc != pcmk_ok) { // Shouldn't really be possible g_set_error(&error, G_OPTION_ERROR, CRM_EX_ERROR, "Invalid CIB source"); return clean_up(CRM_EX_ERROR); } reconcile_output_format(args); add_output_args(); if (args->version && output_format == mon_output_console) { /* Use the text output format here if we are in curses mode but were given * --version. Displaying version information uses printf, and then we * immediately exit. We don't want to initialize curses for that. */ rc = pcmk__output_new(&out, "text", args->output_dest, argv); } else { rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); } if (rc != pcmk_rc_ok) { g_set_error(&error, G_OPTION_ERROR, CRM_EX_ERROR, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); return clean_up(CRM_EX_ERROR); } /* output_format MUST NOT BE CHANGED AFTER THIS POINT. */ /* Apply --include/--exclude flags we used internally. There's no error reporting * here because this would be a programming error. */ apply_include_exclude(options.includes_excludes, output_format, &error); /* And now apply any --include/--exclude flags the user gave on the command line. * These are done in a separate pass from the internal ones because we want to * make sure whatever the user specifies overrides whatever we do. */ if (!apply_include_exclude(options.user_includes_excludes, output_format, &error)) { return clean_up(CRM_EX_USAGE); } crm_mon_register_messages(out); pe__register_messages(out); stonith__register_messages(out); if (args->version) { out->version(out, false); return clean_up(CRM_EX_OK); } /* Extra sanity checks when in CGI mode */ if (output_format == mon_output_cgi) { if (cib && cib->variant == cib_file) { g_set_error(&error, G_OPTION_ERROR, CRM_EX_USAGE, "CGI mode used with CIB file"); return clean_up(CRM_EX_USAGE); } else if (options.external_agent != NULL) { g_set_error(&error, G_OPTION_ERROR, CRM_EX_USAGE, "CGI mode cannot be used with --external-agent"); return clean_up(CRM_EX_USAGE); } else if (options.daemonize == TRUE) { g_set_error(&error, G_OPTION_ERROR, CRM_EX_USAGE, "CGI mode cannot be used with -d"); return clean_up(CRM_EX_USAGE); } } if (output_format == mon_output_xml || output_format == mon_output_legacy_xml) { options.mon_ops |= mon_op_print_timing; } crm_info("Starting %s", crm_system_name); if (cib) { do { if (is_not_set(options.mon_ops, mon_op_one_shot)) { print_as(output_format ,"Waiting until cluster is available on this node ...\n"); } rc = cib_connect(is_not_set(options.mon_ops, mon_op_one_shot)); if (is_set(options.mon_ops, mon_op_one_shot)) { break; } else if (rc != pcmk_ok) { sleep(options.reconnect_msec / 1000); #if CURSES_ENABLED if (output_format == mon_output_console) { clear(); refresh(); } #endif } else { if (output_format == mon_output_html && out->dest != stdout) { printf("Writing html to %s ...\n", args->output_dest); } } } while (rc == -ENOTCONN); } if (rc != pcmk_ok) { if (output_format == mon_output_monitor) { g_set_error(&error, G_OPTION_ERROR, CRM_EX_ERROR, "CLUSTER CRIT: Connection to cluster failed: %s", pcmk_strerror(rc)); return clean_up(MON_STATUS_CRIT); } else { if (rc == -ENOTCONN) { g_set_error(&error, G_OPTION_ERROR, CRM_EX_ERROR, "\nError: cluster is not available on this node"); } else { g_set_error(&error, G_OPTION_ERROR, CRM_EX_ERROR, "\nConnection to cluster failed: %s", pcmk_strerror(rc)); } } if (output_format == mon_output_console) { sleep(2); } return clean_up(crm_errno2exit(rc)); } if (is_set(options.mon_ops, mon_op_one_shot)) { return clean_up(CRM_EX_OK); } mainloop = g_main_loop_new(NULL, FALSE); mainloop_add_signal(SIGTERM, mon_shutdown); mainloop_add_signal(SIGINT, mon_shutdown); #if CURSES_ENABLED if (output_format == mon_output_console) { ncurses_winch_handler = crm_signal_handler(SIGWINCH, mon_winresize); if (ncurses_winch_handler == SIG_DFL || ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR) ncurses_winch_handler = NULL; io_channel = g_io_channel_unix_new(STDIN_FILENO); g_io_add_watch(io_channel, G_IO_IN, detect_user_input, NULL); } #endif refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL); g_main_loop_run(mainloop); g_main_loop_unref(mainloop); if (io_channel != NULL) { g_io_channel_shutdown(io_channel, TRUE, NULL); } crm_info("Exiting %s", crm_system_name); return clean_up(CRM_EX_OK); } /*! * \internal * \brief Print one-line status suitable for use with monitoring software * * \param[in] data_set Working set of CIB state * \param[in] history List of stonith actions * * \note This function's output (and the return code when the program exits) * should conform to https://www.monitoring-plugins.org/doc/guidelines.html */ static void print_simple_status(pcmk__output_t *out, pe_working_set_t * data_set, stonith_history_t *history, unsigned int mon_ops) { GListPtr gIter = NULL; int nodes_online = 0; int nodes_standby = 0; int nodes_maintenance = 0; char *offline_nodes = NULL; gboolean no_dc = FALSE; gboolean offline = FALSE; if (data_set->dc_node == NULL) { mon_ops |= mon_op_has_warnings; no_dc = TRUE; } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; if (node->details->standby && node->details->online) { nodes_standby++; } else if (node->details->maintenance && node->details->online) { nodes_maintenance++; } else if (node->details->online) { nodes_online++; } else { char *s = crm_strdup_printf("offline node: %s", node->details->uname); offline_nodes = pcmk__add_word(offline_nodes, s); free(s); mon_ops |= mon_op_has_warnings; offline = TRUE; } } if (is_set(mon_ops, mon_op_has_warnings)) { out->info(out, "CLUSTER WARN:%s%s%s", no_dc ? " No DC" : "", no_dc && offline ? "," : "", offline ? offline_nodes : ""); free(offline_nodes); } else { char *nodes_standby_s = NULL; char *nodes_maint_s = NULL; if (nodes_standby > 0) { nodes_standby_s = crm_strdup_printf(", %d standby node%s", nodes_standby, pcmk__plural_s(nodes_standby)); } if (nodes_maintenance > 0) { nodes_maint_s = crm_strdup_printf(", %d maintenance node%s", nodes_maintenance, pcmk__plural_s(nodes_maintenance)); } out->info(out, "CLUSTER OK: %d node%s online%s%s, " "%d resource instance%s configured", nodes_online, pcmk__plural_s(nodes_online), nodes_standby_s != NULL ? nodes_standby_s : "", nodes_maint_s != NULL ? nodes_maint_s : "", data_set->ninstances, pcmk__plural_s(data_set->ninstances)); free(nodes_standby_s); free(nodes_maint_s); } } /*! * \internal * \brief Reduce the stonith-history * for successful actions we keep the last of every action-type & target * for failed actions we record as well who had failed * for actions in progress we keep full track * * \param[in] history List of stonith actions * */ static stonith_history_t * reduce_stonith_history(stonith_history_t *history) { stonith_history_t *new = history, *hp, *np; if (new) { hp = new->next; new->next = NULL; while (hp) { stonith_history_t *hp_next = hp->next; hp->next = NULL; for (np = new; ; np = np->next) { if ((hp->state == st_done) || (hp->state == st_failed)) { /* action not in progress */ if (safe_str_eq(hp->target, np->target) && safe_str_eq(hp->action, np->action) && (hp->state == np->state) && ((hp->state == st_done) || safe_str_eq(hp->delegate, np->delegate))) { /* purge older hp */ stonith_history_free(hp); break; } } if (!np->next) { np->next = hp; break; } } hp = hp_next; } } return new; } static int send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, int status, const char *desc) { pid_t pid; /*setenv needs chars, these are ints */ char *rc_s = crm_itoa(rc); char *status_s = crm_itoa(status); char *target_rc_s = crm_itoa(target_rc); crm_debug("Sending external notification to '%s' via '%s'", options.external_recipient, options.external_agent); if(rsc) { setenv("CRM_notify_rsc", rsc, 1); } if (options.external_recipient) { setenv("CRM_notify_recipient", options.external_recipient, 1); } setenv("CRM_notify_node", node, 1); setenv("CRM_notify_task", task, 1); setenv("CRM_notify_desc", desc, 1); setenv("CRM_notify_rc", rc_s, 1); setenv("CRM_notify_target_rc", target_rc_s, 1); setenv("CRM_notify_status", status_s, 1); pid = fork(); if (pid == -1) { crm_perror(LOG_ERR, "notification fork() failed."); } if (pid == 0) { /* crm_debug("notification: I am the child. Executing the nofitication program."); */ execl(options.external_agent, options.external_agent, NULL); exit(CRM_EX_ERROR); } crm_trace("Finished running custom notification program '%s'.", options.external_agent); free(target_rc_s); free(status_s); free(rc_s); return 0; } static void handle_rsc_op(xmlNode * xml, const char *node_id) { int rc = -1; int status = -1; int target_rc = -1; gboolean notify = TRUE; char *rsc = NULL; char *task = NULL; const char *desc = NULL; const char *magic = NULL; const char *id = NULL; const char *node = NULL; xmlNode *n = xml; xmlNode * rsc_op = xml; if(strcmp((const char*)xml->name, XML_LRM_TAG_RSC_OP) != 0) { xmlNode *cIter; for(cIter = xml->children; cIter; cIter = cIter->next) { handle_rsc_op(cIter, node_id); } return; } id = crm_element_value(rsc_op, XML_LRM_ATTR_TASK_KEY); if (id == NULL) { /* Compatibility with <= 1.1.5 */ id = ID(rsc_op); } magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC); if (magic == NULL) { /* non-change */ return; } if (!decode_transition_magic(magic, NULL, NULL, NULL, &status, &rc, &target_rc)) { crm_err("Invalid event %s detected for %s", magic, id); return; } if (parse_op_key(id, &rsc, &task, NULL) == FALSE) { crm_err("Invalid event detected for %s", id); goto bail; } node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET); while (n != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(n))) { n = n->parent; } if(node == NULL && n) { node = crm_element_value(n, XML_ATTR_UNAME); } if (node == NULL && n) { node = ID(n); } if (node == NULL) { node = node_id; } if (node == NULL) { crm_err("No node detected for event %s (%s)", magic, id); goto bail; } /* look up where we expected it to be? */ desc = pcmk_strerror(pcmk_ok); if (status == PCMK_LRM_OP_DONE && target_rc == rc) { crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc); if (rc == PCMK_OCF_NOT_RUNNING) { notify = FALSE; } } else if (status == PCMK_LRM_OP_DONE) { desc = services_ocf_exitcode_str(rc); crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc); } else { desc = services_lrm_status_str(status); crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc); } if (notify && options.external_agent) { send_custom_trap(node, rsc, task, target_rc, rc, status, desc); } bail: free(rsc); free(task); } static gboolean mon_trigger_refresh(gpointer user_data) { mainloop_set_trigger(refresh_trigger); return FALSE; } #define NODE_PATT "/lrm[@id=" static char * get_node_from_xpath(const char *xpath) { char *nodeid = NULL; char *tmp = strstr(xpath, NODE_PATT); if(tmp) { tmp += strlen(NODE_PATT); tmp += 1; nodeid = strdup(tmp); tmp = strstr(nodeid, "\'"); CRM_ASSERT(tmp); tmp[0] = 0; } return nodeid; } static void crm_diff_update_v2(const char *event, xmlNode * msg) { xmlNode *change = NULL; xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); for (change = __xml_first_child(diff); change != NULL; change = __xml_next(change)) { const char *name = NULL; const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); xmlNode *match = NULL; const char *node = NULL; if(op == NULL) { continue; } else if(strcmp(op, "create") == 0) { match = change->children; } else if(strcmp(op, "move") == 0) { continue; } else if(strcmp(op, "delete") == 0) { continue; } else if(strcmp(op, "modify") == 0) { match = first_named_child(change, XML_DIFF_RESULT); if(match) { match = match->children; } } if(match) { name = (const char *)match->name; } crm_trace("Handling %s operation for %s %p, %s", op, xpath, match, name); if(xpath == NULL) { /* Version field, ignore */ } else if(name == NULL) { crm_debug("No result for %s operation to %s", op, xpath); CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0); } else if(strcmp(name, XML_TAG_CIB) == 0) { xmlNode *state = NULL; xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS); for (state = __xml_first_child_element(status); state != NULL; state = __xml_next_element(state)) { node = crm_element_value(state, XML_ATTR_UNAME); if (node == NULL) { node = ID(state); } handle_rsc_op(state, node); } } else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) { xmlNode *state = NULL; for (state = __xml_first_child_element(match); state != NULL; state = __xml_next_element(state)) { node = crm_element_value(state, XML_ATTR_UNAME); if (node == NULL) { node = ID(state); } handle_rsc_op(state, node); } } else if(strcmp(name, XML_CIB_TAG_STATE) == 0) { node = crm_element_value(match, XML_ATTR_UNAME); if (node == NULL) { node = ID(match); } handle_rsc_op(match, node); } else if(strcmp(name, XML_CIB_TAG_LRM) == 0) { node = ID(match); handle_rsc_op(match, node); } else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) { char *local_node = get_node_from_xpath(xpath); handle_rsc_op(match, local_node); free(local_node); } else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) { char *local_node = get_node_from_xpath(xpath); handle_rsc_op(match, local_node); free(local_node); } else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) { char *local_node = get_node_from_xpath(xpath); handle_rsc_op(match, local_node); free(local_node); } else { crm_trace("Ignoring %s operation for %s %p, %s", op, xpath, match, name); } } } static void crm_diff_update_v1(const char *event, xmlNode * msg) { /* Process operation updates */ xmlXPathObject *xpathObj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_LRM_TAG_RSC_OP); int lpc = 0, max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); handle_rsc_op(rsc_op, NULL); } freeXpathObject(xpathObj); } static void crm_diff_update(const char *event, xmlNode * msg) { int rc = -1; static bool stale = FALSE; gboolean cib_updated = FALSE; xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); print_dot(output_format); if (current_cib != NULL) { rc = xml_apply_patchset(current_cib, diff, TRUE); switch (rc) { case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc); free_xml(current_cib); current_cib = NULL; break; case pcmk_ok: cib_updated = TRUE; break; default: crm_notice("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc); free_xml(current_cib); current_cib = NULL; } } if (current_cib == NULL) { crm_trace("Re-requesting the full cib"); cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); } if (options.external_agent) { int format = 0; crm_element_value_int(diff, "format", &format); switch(format) { case 1: crm_diff_update_v1(event, msg); break; case 2: crm_diff_update_v2(event, msg); break; default: crm_err("Unknown patch format: %d", format); } } if (current_cib == NULL) { if(!stale) { print_as(output_format, "--- Stale data ---"); } stale = TRUE; return; } stale = FALSE; kick_refresh(cib_updated); } static gboolean mon_refresh_display(gpointer user_data) { xmlNode *cib_copy = copy_xml(current_cib); stonith_history_t *stonith_history = NULL; int history_rc = 0; last_refresh = time(NULL); if (cli_config_update(&cib_copy, NULL, FALSE) == FALSE) { if (cib) { cib->cmds->signoff(cib); } out->err(out, "Upgrade failed: %s", pcmk_strerror(-pcmk_err_schema_validation)); if (output_format == mon_output_console) { sleep(2); } clean_up(CRM_EX_CONFIG); return FALSE; } /* get the stonith-history if there is evidence we need it */ while (is_set(options.mon_ops, mon_op_fence_history)) { if (st != NULL) { history_rc = st->cmds->history(st, st_opt_sync_call, NULL, &stonith_history, 120); if (history_rc != 0) { out->err(out, "Critical: Unable to get stonith-history"); mon_cib_connection_destroy(NULL); } else { stonith_history = stonith__sort_history(stonith_history); if (is_not_set(options.mon_ops, mon_op_fence_full_history) && output_format != mon_output_xml) { stonith_history = reduce_stonith_history(stonith_history); } break; /* all other cases are errors */ } } else { out->err(out, "Critical: No stonith-API"); } free_xml(cib_copy); out->err(out, "Reading stonith-history failed"); if (output_format == mon_output_console) { sleep(2); } return FALSE; } if (mon_data_set == NULL) { mon_data_set = pe_new_working_set(); CRM_ASSERT(mon_data_set != NULL); } set_bit(mon_data_set->flags, pe_flag_no_compat); mon_data_set->input = cib_copy; cluster_status(mon_data_set); /* Unpack constraints if any section will need them * (tickets may be referenced in constraints but not granted yet, * and bans need negative location constraints) */ if (is_set(show, mon_show_bans) || is_set(show, mon_show_tickets)) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, mon_data_set->input); unpack_constraints(cib_constraints, mon_data_set); } switch (output_format) { case mon_output_html: case mon_output_cgi: if (print_html_status(out, mon_data_set, stonith_history, options.mon_ops, show, options.neg_location_prefix) != 0) { g_set_error(&error, G_OPTION_ERROR, CRM_EX_CANTCREAT, "Critical: Unable to output html file"); clean_up(CRM_EX_CANTCREAT); return FALSE; } break; case mon_output_legacy_xml: case mon_output_xml: print_xml_status(out, mon_data_set, crm_errno2exit(history_rc), stonith_history, options.mon_ops, show, options.neg_location_prefix); break; case mon_output_monitor: print_simple_status(out, mon_data_set, stonith_history, options.mon_ops); if (is_set(options.mon_ops, mon_op_has_warnings)) { clean_up(MON_STATUS_WARN); return FALSE; } break; case mon_output_console: /* If curses is not enabled, this will just fall through to the plain * text case. */ #if CURSES_ENABLED blank_screen(); print_status(out, mon_data_set, stonith_history, options.mon_ops, show, options.neg_location_prefix); refresh(); break; #endif case mon_output_plain: print_status(out, mon_data_set, stonith_history, options.mon_ops, show, options.neg_location_prefix); break; case mon_output_unset: case mon_output_none: break; } stonith_history_free(stonith_history); stonith_history = NULL; pe_reset_working_set(mon_data_set); return TRUE; } static void mon_st_callback_event(stonith_t * st, stonith_event_t * e) { if (st->state == stonith_disconnected) { /* disconnect cib as well and have everything reconnect */ mon_cib_connection_destroy(NULL); } else if (options.external_agent) { char *desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)", e->operation, e->origin, e->target, pcmk_strerror(e->result), e->id); send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc); free(desc); } } static void kick_refresh(gboolean data_updated) { static int updates = 0; time_t now = time(NULL); if (data_updated) { updates++; } if(refresh_timer == NULL) { refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL); } /* Refresh * - immediately if the last update was more than 5s ago * - every 10 cib-updates * - at most 2s after the last update */ if ((now - last_refresh) > (options.reconnect_msec / 1000)) { mainloop_set_trigger(refresh_trigger); mainloop_timer_stop(refresh_timer); updates = 0; } else if(updates >= 10) { mainloop_set_trigger(refresh_trigger); mainloop_timer_stop(refresh_timer); updates = 0; } else { mainloop_timer_start(refresh_timer); } } static void mon_st_callback_display(stonith_t * st, stonith_event_t * e) { if (st->state == stonith_disconnected) { /* disconnect cib as well and have everything reconnect */ mon_cib_connection_destroy(NULL); } else { print_dot(output_format); kick_refresh(TRUE); } } static void clean_up_connections(void) { if (cib != NULL) { cib->cmds->signoff(cib); cib_delete(cib); cib = NULL; } if (st != NULL) { if (st->state != stonith_disconnected) { st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT); st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE); st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY); st->cmds->disconnect(st); } stonith_api_delete(st); st = NULL; } } static void handle_html_output(crm_exit_t exit_code) { xmlNodePtr html = NULL; pcmk__html_add_header(html, "meta", "http-equiv", "refresh", "content", crm_itoa(options.reconnect_msec/1000), NULL); out->finish(out, exit_code, true, (void **) &html); } /* * De-init ncurses, disconnect from the CIB manager, disconnect fencing, * deallocate memory and show usage-message if requested. * * We don't actually return, but nominally returning crm_exit_t allows a usage * like "return clean_up(exit_code);" which helps static analysis understand the * code flow. */ static crm_exit_t clean_up(crm_exit_t exit_code) { /* Quitting crm_mon is much more complicated than it ought to be. */ /* (1) Close connections, free things, etc. */ clean_up_connections(); free(options.pid_file); free(options.neg_location_prefix); g_slist_free_full(options.includes_excludes, free); pe_free_working_set(mon_data_set); mon_data_set = NULL; g_strfreev(processed_args); /* (2) If this is abnormal termination and we're in curses mode, shut down * curses first. Any messages displayed to the screen before curses is shut * down will be lost because doing the shut down will also restore the * screen to whatever it looked like before crm_mon was started. */ if ((error != NULL || exit_code == CRM_EX_USAGE) && output_format == mon_output_console) { out->finish(out, exit_code, false, NULL); pcmk__output_free(out); out = NULL; } /* (3) If this is a command line usage related failure, print the usage * message. */ if (exit_code == CRM_EX_USAGE && (output_format == mon_output_console || output_format == mon_output_plain)) { char *help = g_option_context_get_help(context, TRUE, NULL); fprintf(stderr, "%s", help); free(help); } pcmk__free_arg_context(context); /* (4) If this is any kind of error, print the error out and exit. Make * sure to handle situations both before and after formatted output is * set up. We want errors to appear formatted if at all possible. */ if (error != NULL) { if (out != NULL) { out->err(out, "%s: %s\n", g_get_prgname(), error->message); out->finish(out, exit_code, true, NULL); pcmk__output_free(out); } else { fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message); } g_clear_error(&error); crm_exit(exit_code); } /* (5) Print formatted output to the screen if we made it far enough in * crm_mon to be able to do so. */ if (out != NULL) { switch (output_format) { case mon_output_cgi: case mon_output_html: handle_html_output(exit_code); break; default: out->finish(out, exit_code, true, NULL); break; } pcmk__output_free(out); pcmk__unregister_formats(); } crm_exit(exit_code); } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 7e10739496..8691575f21 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,1558 +1,1565 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include bool BE_QUIET = FALSE; bool scope_master = FALSE; int cib_options = cib_sync_call; static GMainLoop *mainloop = NULL; // Things that should be cleaned up on exit static cib_t *cib_conn = NULL; static pcmk_controld_api_t *controld_api = NULL; static pe_working_set_t *data_set = NULL; #define MESSAGE_TIMEOUT_S 60 // Clean up and exit -static void +static crm_exit_t bye(crm_exit_t exit_code) { - static bool crm_resourece_shutdown_flag = FALSE; + static bool crm_resource_shutdown_flag = false; - if (crm_resourece_shutdown_flag) { - return; + if (crm_resource_shutdown_flag) { + // Allow usage like "return bye(...);" + return exit_code; } - crm_resourece_shutdown_flag = TRUE; + crm_resource_shutdown_flag = true; if (cib_conn != NULL) { - cib_conn->cmds->signoff(cib_conn); - cib_delete(cib_conn); + cib_t *save_cib_conn = cib_conn; + cib_conn = NULL; + save_cib_conn->cmds->signoff(save_cib_conn); + cib_delete(save_cib_conn); } if (controld_api != NULL) { - pcmk_free_controld_api(controld_api); + pcmk_controld_api_t *save_controld_api = controld_api; + + controld_api = NULL; + pcmk_free_controld_api(save_controld_api); } pe_free_working_set(data_set); data_set = NULL; crm_exit(exit_code); - return; + return exit_code; } static gboolean resource_ipc_timeout(gpointer data) { - fprintf(stderr, "Aborting because no messages received in %d seconds\n", + // Start with newline because "Waiting for ..." message doesn't have one + fprintf(stderr, "\nAborting because no messages received in %d seconds\n", MESSAGE_TIMEOUT_S); crm_err("No messages received in %d seconds", MESSAGE_TIMEOUT_S); bye(CRM_EX_TIMEOUT); return FALSE; } static void handle_controller_reply(pcmk_controld_api_t *capi, void *api_data, void *user_data) { fprintf(stderr, "."); if ((capi->replies_expected(capi) == 0) && mainloop && g_main_loop_is_running(mainloop)) { fprintf(stderr, " OK\n"); crm_debug("Got all the replies we expected"); bye(CRM_EX_OK); } } static void handle_controller_drop(pcmk_controld_api_t *capi, void *api_data, void *user_data) { crm_info("Connection to controller was terminated"); bye(CRM_EX_DISCONNECT); } static void start_mainloop(pcmk_controld_api_t *capi) { if (capi->replies_expected(capi) > 0) { unsigned int count = capi->replies_expected(capi); fprintf(stderr, "Waiting for %d %s from the controller", count, pcmk__plural_alt(count, "reply", "replies")); mainloop = g_main_loop_new(NULL, FALSE); g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); g_main_loop_run(mainloop); } } static int compare_id(gconstpointer a, gconstpointer b) { return strcmp((const char *)a, (const char *)b); } static GListPtr build_constraint_list(xmlNode *root) { GListPtr retval = NULL; xmlNode *cib_constraints = NULL; xmlXPathObjectPtr xpathObj = NULL; int ndx = 0; cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root); xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION); for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) { xmlNode *match = getXpathResult(xpathObj, ndx); retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id); } freeXpathObject(xpathObj); return retval; } /* short option letters still available: eEJkKXyYZ */ static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "help", no_argument, NULL, '?', "\t\tDisplay this text and exit", pcmk__option_default }, { "version", no_argument, NULL, '$', "\t\tDisplay version information and exit", pcmk__option_default }, { "verbose", no_argument, NULL, 'V', "\t\tIncrease debug output (may be specified multiple times)", pcmk__option_default }, { "quiet", no_argument, NULL, 'Q', "\t\tBe less descriptive in results", pcmk__option_default }, { "resource", required_argument, NULL, 'r', "\tResource ID", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nQueries:", pcmk__option_default }, { "list", no_argument, NULL, 'L', "\t\tList all cluster resources with status", pcmk__option_default }, { "list-raw", no_argument, NULL, 'l', "\t\tList IDs of all instantiated resources (individual members rather " "than groups etc.)", pcmk__option_default }, { "list-cts", no_argument, NULL, 'c', NULL, pcmk__option_hidden }, { "list-operations", no_argument, NULL, 'O', "\tList active resource operations, optionally filtered by --resource " "and/or --node", pcmk__option_default }, { "list-all-operations", no_argument, NULL, 'o', "List all resource operations, optionally filtered by --resource " "and/or --node", pcmk__option_default }, { "list-standards", no_argument, NULL, 0, "\tList supported standards", pcmk__option_default }, { "list-ocf-providers", no_argument, NULL, 0, "List all available OCF providers", pcmk__option_default }, { "list-agents", required_argument, NULL, 0, "List all agents available for the named standard and/or provider", pcmk__option_default }, { "list-ocf-alternatives", required_argument, NULL, 0, "List all available providers for the named OCF agent", pcmk__option_default }, { "show-metadata", required_argument, NULL, 0, "Show the metadata for the named class:provider:agent", pcmk__option_default }, { "query-xml", no_argument, NULL, 'q', "\tShow XML configuration of resource (after any template expansion)", pcmk__option_default }, { "query-xml-raw", no_argument, NULL, 'w', "\tShow XML configuration of resource (before any template expansion)", pcmk__option_default }, { "get-parameter", required_argument, NULL, 'g', "Display named parameter for resource (use instance attribute unless " "--meta or --utilization is specified)", pcmk__option_default }, { "get-property", required_argument, NULL, 'G', "Display named property of resource ('class', 'type', or 'provider') " "(requires --resource)", pcmk__option_hidden }, { "locate", no_argument, NULL, 'W', "\t\tShow node(s) currently running resource", pcmk__option_default }, { "stack", no_argument, NULL, 'A', "\t\tDisplay the prerequisites and dependents of a resource", pcmk__option_default }, { "constraints", no_argument, NULL, 'a', "\tDisplay the (co)location constraints that apply to a resource", pcmk__option_default }, { "why", no_argument, NULL, 'Y', "\t\tShow why resources are not running, optionally filtered by " "--resource and/or --node", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nCommands:", pcmk__option_default }, { "validate", no_argument, NULL, 0, "\t\tValidate resource configuration by calling agent's validate-all " "action. The configuration may be specified either by giving an " "existing resource name with -r, or by specifying --class, " "--agent, and --provider arguments, along with any number of " "--option arguments.", pcmk__option_default }, { "cleanup", no_argument, NULL, 'C', "\t\tIf resource has any past failures, clear its history and " "fail count. Optionally filtered by --resource, --node, " "--operation, and --interval (otherwise all). --operation and " "--interval apply to fail counts, but entire history is always " "cleared, to allow current state to be rechecked. If the named " "resource is part of a group, or one numbered instance of a clone " "or bundled resource, the clean-up applies to the whole collective " "resource unless --force is given.", pcmk__option_default }, { "refresh", no_argument, NULL, 'R', "\t\tDelete resource's history (including failures) so its current " "state is rechecked. Optionally filtered by --resource and --node " "(otherwise all). If the named resource is part of a group, or one " "numbered instance of a clone or bundled resource, the clean-up " "applies to the whole collective resource unless --force is given.", pcmk__option_default }, { "set-parameter", required_argument, NULL, 'p', "Set named parameter for resource (requires -v). Use instance " "attribute unless --meta or --utilization is specified.", pcmk__option_default }, { "delete-parameter", required_argument, NULL, 'd', "Delete named parameter for resource. Use instance attribute unless " "--meta or --utilization is specified.", pcmk__option_default }, { "set-property", required_argument, NULL, 'S', "Set named property of resource ('class', 'type', or 'provider') " "(requires -r, -t, -v)", pcmk__option_hidden }, { "-spacer-", no_argument, NULL, '-', "\nResource location:", pcmk__option_default }, { "move", no_argument, NULL, 'M', "\t\tCreate a constraint to move resource. If --node is specified, the " "constraint will be to move to that node, otherwise it will be to " "ban the current node. Unless --force is specified, this will " "return an error if the resource is already running on the " "specified node. If --force is specified, this will always ban the " "current node. Optional: --lifetime, --master. NOTE: This may " "prevent the resource from running on its previous location until " "the implicit constraint expires or is removed with --clear.", pcmk__option_default }, { "ban", no_argument, NULL, 'B', "\t\tCreate a constraint to keep resource off a node. Optional: " "--node, --lifetime, --master. NOTE: This will prevent the " "resource from running on the affected node until the implicit " "constraint expires or is removed with --clear. If --node is not " "specified, it defaults to the node currently running the resource " "for primitives and groups, or the master for promotable clones " "with promoted-max=1 (all other situations result in an error as " "there is no sane default).", pcmk__option_default }, { "clear", no_argument, NULL, 'U', "\t\tRemove all constraints created by the --ban and/or --move " "commands. Requires: --resource. Optional: --node, --master, " "--expired. If --node is not specified, all constraints created " "by --ban and --move will be removed for the named resource. If " "--node and --force are specified, any constraint created by " "--move will be cleared, even if it is not for the specified node. " "If --expired is specified, only those constraints whose lifetimes " "have expired will be removed.", pcmk__option_default }, { "expired", no_argument, NULL, 'e', "\t\tModifies the --clear argument to remove constraints with " "expired lifetimes.", pcmk__option_default }, { "lifetime", required_argument, NULL, 'u', "\tLifespan (as ISO 8601 duration) of created constraints (with -B, " "-M) (see https://en.wikipedia.org/wiki/ISO_8601#Durations)", pcmk__option_default }, { "master", no_argument, NULL, 0, "\t\tLimit scope of command to Master role (with -B, -M, -U). For -B " "and -M, the previous master may remain active in the Slave role.", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nAdvanced Commands:", pcmk__option_default }, { "delete", no_argument, NULL, 'D', "\t\t(Advanced) Delete a resource from the CIB. Required: -t", pcmk__option_default }, { "fail", no_argument, NULL, 'F', "\t\t(Advanced) Tell the cluster this resource has failed", pcmk__option_default }, { "restart", no_argument, NULL, 0, "\t\t(Advanced) Tell the cluster to restart this resource and " "anything that depends on it", pcmk__option_default }, { "wait", no_argument, NULL, 0, "\t\t(Advanced) Wait until the cluster settles into a stable state", pcmk__option_default }, { "force-demote", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and demote a resource on the local " "node. Unless --force is specified, this will refuse to do so if " "the cluster believes the resource is a clone instance already " "running on the local node.", pcmk__option_default }, { "force-stop", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and stop a resource on the local node", pcmk__option_default }, { "force-start", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and start a resource on the local " "node. Unless --force is specified, this will refuse to do so if " "the cluster believes the resource is a clone instance already " "running on the local node.", pcmk__option_default }, { "force-promote", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and promote a resource on the local " "node. Unless --force is specified, this will refuse to do so if " "the cluster believes the resource is a clone instance already " "running on the local node.", pcmk__option_default }, { "force-check", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and check the state of a resource on " "the local node", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nValidate Options:", pcmk__option_default }, { "class", required_argument, NULL, 0, "\tThe standard the resource agent confirms to (for example, ocf). " "Use with --agent, --provider, --option, and --validate.", pcmk__option_default }, { "agent", required_argument, NULL, 0, "\tThe agent to use (for example, IPaddr). Use with --class, " "--provider, --option, and --validate.", pcmk__option_default }, { "provider", required_argument, NULL, 0, "\tThe vendor that supplies the resource agent (for example, " "heartbeat). Use with --class, --agent, --option, and --validate.", pcmk__option_default }, { "option", required_argument, NULL, 0, "\tSpecify a device configuration parameter as NAME=VALUE (may be " "specified multiple times). Use with --validate and without the " "-r option.", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:", pcmk__option_default }, { "node", required_argument, NULL, 'N', "\tNode name", pcmk__option_default }, { "recursive", no_argument, NULL, 0, "\tFollow colocation chains when using --set-parameter", pcmk__option_default }, { "resource-type", required_argument, NULL, 't', "Resource XML element (primitive, group, etc.) (with -D)", pcmk__option_default }, { "parameter-value", required_argument, NULL, 'v', "Value to use with -p", pcmk__option_default }, { "meta", no_argument, NULL, 'm', "\t\tUse resource meta-attribute instead of instance attribute " "(with -p, -g, -d)", pcmk__option_default }, { "utilization", no_argument, NULL, 'z', "\tUse resource utilization attribute instead of instance attribute " "(with -p, -g, -d)", pcmk__option_default }, { "operation", required_argument, NULL, 'n', "\tOperation to clear instead of all (with -C -r)", pcmk__option_default }, { "interval", required_argument, NULL, 'I', "\tInterval of operation to clear (default 0) (with -C -r -n)", pcmk__option_default }, { "set-name", required_argument, NULL, 's', "\t(Advanced) XML ID of attributes element to use (with -p, -d)", pcmk__option_default }, { "nvpair", required_argument, NULL, 'i', "\t(Advanced) XML ID of nvpair element to use (with -p, -d)", pcmk__option_default }, { "timeout", required_argument, NULL, 'T', "\t(Advanced) Abort if command does not finish in this time (with " "--restart, --wait, --force-*)", pcmk__option_default }, { "force", no_argument, NULL, 'f', "\t\tIf making CIB changes, do so regardless of quorum. See help for " "individual commands for additional behavior.", pcmk__option_default }, { "xml-file", required_argument, NULL, 'x', NULL, pcmk__option_hidden }, /* legacy options */ { "host-uname", required_argument, NULL, 'H', NULL, pcmk__option_hidden }, { "-spacer-", 1, NULL, '-', "\nExamples:", pcmk__option_paragraph }, { "-spacer-", 1, NULL, '-', "List the available OCF agents:", pcmk__option_paragraph }, { "-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf", pcmk__option_example }, { "-spacer-", 1, NULL, '-', "List the available OCF agents from the linux-ha project:", pcmk__option_paragraph }, { "-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf:heartbeat", pcmk__option_example }, { "-spacer-", 1, NULL, '-', "Move 'myResource' to a specific node:", pcmk__option_paragraph }, { "-spacer-", 1, NULL, '-', " crm_resource --resource myResource --move --node altNode", pcmk__option_example }, { "-spacer-", 1, NULL, '-', "Allow (but not force) 'myResource' to move back to its original " "location:", pcmk__option_paragraph }, { "-spacer-", 1, NULL, '-', " crm_resource --resource myResource --clear", pcmk__option_example }, { "-spacer-", 1, NULL, '-', "Stop 'myResource' (and anything that depends on it):", pcmk__option_paragraph }, { "-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter target-role " "--meta --parameter-value Stopped", pcmk__option_example }, { "-spacer-", 1, NULL, '-', "Tell the cluster not to manage 'myResource' (the cluster will not " "attempt to start or stop the resource under any circumstances; " "useful when performing maintenance tasks on a resource):", pcmk__option_paragraph }, { "-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter is-managed " "--meta --parameter-value false", pcmk__option_example }, { "-spacer-", 1, NULL, '-', "Erase the operation history of 'myResource' on 'aNode' (the cluster " "will 'forget' the existing resource state, including any " "errors, and attempt to recover the resource; useful when a " "resource had failed permanently and has been repaired " "by an administrator):", pcmk__option_paragraph }, { "-spacer-", 1, NULL, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk__option_example }, { 0, 0, 0, 0 } }; int main(int argc, char **argv) { char rsc_cmd = 'L'; const char *v_class = NULL; const char *v_agent = NULL; const char *v_provider = NULL; char *name = NULL; char *value = NULL; GHashTable *validate_options = NULL; const char *rsc_id = NULL; const char *host_uname = NULL; const char *prop_name = NULL; const char *prop_value = NULL; const char *rsc_type = NULL; const char *prop_id = NULL; const char *prop_set = NULL; const char *rsc_long_cmd = NULL; const char *longname = NULL; const char *operation = NULL; const char *interval_spec = NULL; const char *cib_file = getenv("CIB_file"); GHashTable *override_params = NULL; char *xml_file = NULL; xmlNode *cib_xml_copy = NULL; pe_resource_t *rsc = NULL; bool recursive = FALSE; bool validate_cmdline = FALSE; /* whether we are just validating based on command line options */ bool require_resource = TRUE; /* whether command requires that resource be specified */ bool require_dataset = TRUE; /* whether command requires populated dataset instance */ bool require_crmd = FALSE; // whether command requires controller connection bool clear_expired = FALSE; int rc = pcmk_ok; int is_ocf_rc = 0; int option_index = 0; int timeout_ms = 0; int argerr = 0; int flag; int find_flags = 0; // Flags to use when searching for resource crm_exit_t exit_code = CRM_EX_OK; crm_log_cli_init("crm_resource"); pcmk__set_cli_options(NULL, "| [options]", long_options, "perform tasks related to Pacemaker " "cluster resources"); validate_options = crm_str_table_new(); while (1) { flag = pcmk__next_cli_option(argc, argv, &option_index, &longname); if (flag == -1) break; switch (flag) { case 0: /* long options with no short equivalent */ if (safe_str_eq("master", longname)) { scope_master = TRUE; } else if(safe_str_eq(longname, "recursive")) { recursive = TRUE; } else if (safe_str_eq("wait", longname)) { rsc_cmd = flag; rsc_long_cmd = longname; require_resource = FALSE; require_dataset = FALSE; } else if ( safe_str_eq("validate", longname) || safe_str_eq("restart", longname) || safe_str_eq("force-demote", longname) || safe_str_eq("force-stop", longname) || safe_str_eq("force-start", longname) || safe_str_eq("force-promote", longname) || safe_str_eq("force-check", longname)) { rsc_cmd = flag; rsc_long_cmd = longname; find_flags = pe_find_renamed|pe_find_anon; crm_log_args(argc, argv); } else if (safe_str_eq("list-ocf-providers", longname) || safe_str_eq("list-ocf-alternatives", longname) || safe_str_eq("list-standards", longname)) { const char *text = NULL; lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); if (safe_str_eq("list-ocf-providers", longname) || safe_str_eq("list-ocf-alternatives", longname)) { rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, optarg, &list); text = "OCF providers"; } else if (safe_str_eq("list-standards", longname)) { rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); text = "standards"; } if (rc > 0) { for (iter = list; iter != NULL; iter = iter->next) { printf("%s\n", iter->val); } lrmd_list_freeall(list); } else if (optarg) { fprintf(stderr, "No %s found for %s\n", text, optarg); exit_code = CRM_EX_NOSUCH; } else { fprintf(stderr, "No %s found\n", text); exit_code = CRM_EX_NOSUCH; } lrmd_api_delete(lrmd_conn); - bye(exit_code); + return bye(exit_code); } else if (safe_str_eq("show-metadata", longname)) { char *standard = NULL; char *provider = NULL; char *type = NULL; char *metadata = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); rc = crm_parse_agent_spec(optarg, &standard, &provider, &type); if (rc == pcmk_ok) { rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, provider, type, &metadata, 0); } else { fprintf(stderr, "'%s' is not a valid agent specification\n", optarg); rc = -ENXIO; } if (metadata) { printf("%s\n", metadata); } else { fprintf(stderr, "Metadata query for %s failed: %s\n", optarg, pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); } lrmd_api_delete(lrmd_conn); - bye(exit_code); + return bye(exit_code); } else if (safe_str_eq("list-agents", longname)) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; char *provider = strchr (optarg, ':'); lrmd_t *lrmd_conn = lrmd_api_new(); if (provider) { *provider++ = 0; } rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, optarg, provider); if (rc > 0) { for (iter = list; iter != NULL; iter = iter->next) { printf("%s\n", iter->val); } lrmd_list_freeall(list); } else { fprintf(stderr, "No agents found for standard=%s, provider=%s\n", optarg, (provider? provider : "*")); exit_code = CRM_EX_NOSUCH; } lrmd_api_delete(lrmd_conn); - bye(exit_code); + return bye(exit_code); } else if (safe_str_eq("class", longname)) { if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) { if (BE_QUIET == FALSE) { fprintf(stdout, "Standard %s does not support parameters\n", optarg); } + return bye(exit_code); - bye(exit_code); } else { v_class = optarg; } validate_cmdline = TRUE; require_resource = FALSE; } else if (safe_str_eq("agent", longname)) { validate_cmdline = TRUE; require_resource = FALSE; v_agent = optarg; } else if (safe_str_eq("provider", longname)) { validate_cmdline = TRUE; require_resource = FALSE; v_provider = optarg; } else if (safe_str_eq("option", longname)) { crm_info("Scanning: --option %s", optarg); rc = pcmk_scan_nvpair(optarg, &name, &value); if (rc != 2) { fprintf(stderr, "Invalid option: --option %s: %s", optarg, pcmk_strerror(rc)); argerr++; } else { crm_info("Got: '%s'='%s'", name, value); } g_hash_table_replace(validate_options, name, value); } else { crm_err("Unhandled long option: %s", longname); } break; case 'V': resource_verbose++; crm_bump_log_level(argc, argv); break; case '$': case '?': pcmk__cli_help(flag, CRM_EX_OK); break; case 'x': xml_file = strdup(optarg); break; case 'Q': BE_QUIET = TRUE; break; case 'm': attr_set_type = XML_TAG_META_SETS; break; case 'z': attr_set_type = XML_TAG_UTILIZATION; break; case 'u': move_lifetime = strdup(optarg); break; case 'f': do_force = TRUE; crm_log_args(argc, argv); break; case 'i': prop_id = optarg; break; case 's': prop_set = optarg; break; case 'r': rsc_id = optarg; break; case 'v': prop_value = optarg; break; case 't': rsc_type = optarg; break; case 'T': timeout_ms = crm_get_msec(optarg); break; case 'e': clear_expired = TRUE; require_resource = FALSE; break; case 'C': case 'R': crm_log_args(argc, argv); require_resource = FALSE; if (cib_file == NULL) { require_crmd = TRUE; } rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'n': operation = optarg; break; case 'I': interval_spec = optarg; break; case 'D': require_dataset = FALSE; crm_log_args(argc, argv); rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'F': require_crmd = TRUE; crm_log_args(argc, argv); rsc_cmd = flag; break; case 'U': case 'B': case 'M': crm_log_args(argc, argv); rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'c': case 'L': case 'l': case 'O': case 'o': require_resource = FALSE; rsc_cmd = flag; break; case 'Y': require_resource = FALSE; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'q': case 'w': rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'W': case 'A': case 'a': rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'S': require_dataset = FALSE; crm_log_args(argc, argv); prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'p': case 'd': crm_log_args(argc, argv); prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'G': case 'g': prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'H': case 'N': crm_trace("Option %c => %s", flag, optarg); host_uname = optarg; break; default: CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag); ++argerr; break; } } // Catch the case where the user didn't specify a command if (rsc_cmd == 'L') { require_resource = FALSE; } // --expired without --clear/-U doesn't make sense if (clear_expired == TRUE && rsc_cmd != 'U') { CMD_ERR("--expired requires --clear or -U"); argerr++; } if (optind < argc && argv[optind] != NULL && rsc_cmd == 0 && rsc_long_cmd) { override_params = crm_str_table_new(); while (optind < argc && argv[optind] != NULL) { char *name = calloc(1, strlen(argv[optind])); char *value = calloc(1, strlen(argv[optind])); int rc = sscanf(argv[optind], "%[^=]=%s", name, value); if(rc == 2) { g_hash_table_replace(override_params, name, value); } else { CMD_ERR("Error parsing '%s' as a name=value pair for --%s", argv[optind], rsc_long_cmd); free(value); free(name); argerr++; } optind++; } } else if (optind < argc && argv[optind] != NULL && rsc_cmd == 0) { CMD_ERR("non-option ARGV-elements: "); while (optind < argc && argv[optind] != NULL) { CMD_ERR("[%d of %d] %s ", optind, argc, argv[optind]); optind++; argerr++; } } if (optind > argc) { ++argerr; } // Sanity check validating from command line parameters. If everything checks out, // go ahead and run the validation. This way we don't need a CIB connection. if (validate_cmdline == TRUE) { // -r cannot be used with any of --class, --agent, or --provider if (rsc_id != NULL) { CMD_ERR("--resource cannot be used with --class, --agent, and --provider"); argerr++; // If --class, --agent, or --provider are given, --validate must also be given. } else if (!safe_str_eq(rsc_long_cmd, "validate")) { CMD_ERR("--class, --agent, and --provider require --validate"); argerr++; // Not all of --class, --agent, and --provider need to be given. Not all // classes support the concept of a provider. Check that what we were given // is valid. } else if (crm_str_eq(v_class, "stonith", TRUE)) { if (v_provider != NULL) { CMD_ERR("stonith does not support providers"); argerr++; } else if (stonith_agent_exists(v_agent, 0) == FALSE) { CMD_ERR("%s is not a known stonith agent", v_agent ? v_agent : ""); argerr++; } } else if (resources_agent_exists(v_class, v_provider, v_agent) == FALSE) { CMD_ERR("%s:%s:%s is not a known resource", v_class ? v_class : "", v_provider ? v_provider : "", v_agent ? v_agent : ""); argerr++; } if (argerr == 0) { rc = cli_resource_execute_from_params("test", v_class, v_provider, v_agent, "validate-all", validate_options, override_params, timeout_ms); exit_code = crm_errno2exit(rc); - bye(exit_code); + return bye(exit_code); } } if (argerr) { CMD_ERR("Invalid option(s) supplied, use --help for valid usage"); - bye(CRM_EX_USAGE); + return bye(CRM_EX_USAGE); } if (do_force) { crm_debug("Forcing..."); cib_options |= cib_quorum_override; } if (require_resource && !rsc_id) { CMD_ERR("Must supply a resource id with -r"); rc = -ENXIO; goto bail; } if (find_flags && rsc_id) { require_dataset = TRUE; } - /* Establish a connection to the CIB manager */ + // Establish a connection to the CIB cib_conn = cib_new(); rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); if (rc != pcmk_ok) { - CMD_ERR("Error connecting to the CIB manager: %s", pcmk_strerror(rc)); + CMD_ERR("Could not connect to the CIB: %s", pcmk_strerror(rc)); goto bail; } /* Populate working set from XML file if specified or CIB query otherwise */ if (require_dataset) { if (xml_file != NULL) { cib_xml_copy = filename2xml(xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); } if(rc != pcmk_ok) { goto bail; } /* Populate the working set instance */ data_set = pe_new_working_set(); if (data_set == NULL) { rc = -ENOMEM; goto bail; } set_bit(data_set->flags, pe_flag_no_counts); set_bit(data_set->flags, pe_flag_no_compat); rc = update_working_set_xml(data_set, &cib_xml_copy); if (rc != pcmk_ok) { goto bail; } cluster_status(data_set); } // If command requires that resource exist if specified, find it if (find_flags && rsc_id) { rsc = pe_find_resource_with_flags(data_set->resources, rsc_id, find_flags); if (rsc == NULL) { CMD_ERR("Resource '%s' not found", rsc_id); rc = -ENXIO; goto bail; } } // Establish a connection to the controller if needed if (require_crmd) { char *client_uuid; pcmk_controld_api_cb_t dispatch_cb = { handle_controller_reply, NULL }; pcmk_controld_api_cb_t destroy_cb = { handle_controller_drop, NULL }; client_uuid = pcmk__getpid_s(); controld_api = pcmk_new_controld_api(crm_system_name, client_uuid); free(client_uuid); rc = controld_api->connect(controld_api, true, &dispatch_cb, &destroy_cb); if (rc != pcmk_rc_ok) { CMD_ERR("Error connecting to the controller: %s", pcmk_rc_str(rc)); rc = pcmk_rc2legacy(rc); goto bail; } } /* Handle rsc_cmd appropriately */ if (rsc_cmd == 'L') { rc = pcmk_ok; cli_resource_print_list(data_set, FALSE); } else if (rsc_cmd == 'l') { int found = 0; GListPtr lpc = NULL; rc = pcmk_ok; for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { rsc = (pe_resource_t *) lpc->data; found++; cli_resource_print_raw(rsc); } if (found == 0) { printf("NO resources configured\n"); rc = -ENXIO; } } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "restart")) { /* We don't pass data_set because rsc needs to stay valid for the entire * lifetime of cli_resource_restart(), but it will reset and update the * working set multiple times, so it needs to use its own copy. */ rc = cli_resource_restart(rsc, host_uname, timeout_ms, cib_conn); } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "wait")) { rc = wait_till_stable(timeout_ms, cib_conn); } else if (rsc_cmd == 0 && rsc_long_cmd) { // validate, force-(stop|start|demote|promote|check) rc = cli_resource_execute(rsc, rsc_id, rsc_long_cmd, override_params, timeout_ms, cib_conn, data_set); if (rc >= 0) { is_ocf_rc = 1; } } else if (rsc_cmd == 'A' || rsc_cmd == 'a') { GListPtr lpc = NULL; xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); unpack_constraints(cib_constraints, data_set); // Constraints apply to group/clone, not member/instance rsc = uber_parent(rsc); for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { pe_resource_t *r = (pe_resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } cli_resource_print_colocation(rsc, TRUE, rsc_cmd == 'A', 1); fprintf(stdout, "* %s\n", rsc->id); cli_resource_print_location(rsc, NULL); for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { pe_resource_t *r = (pe_resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } cli_resource_print_colocation(rsc, FALSE, rsc_cmd == 'A', 1); } else if (rsc_cmd == 'c') { GListPtr lpc = NULL; rc = pcmk_ok; for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { rsc = (pe_resource_t *) lpc->data; cli_resource_print_cts(rsc); } cli_resource_print_cts_constraints(data_set); } else if (rsc_cmd == 'F') { rc = cli_resource_fail(controld_api, host_uname, rsc_id, data_set); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } rc = pcmk_rc2legacy(rc); } else if (rsc_cmd == 'O') { rc = cli_resource_print_operations(rsc_id, host_uname, TRUE, data_set); } else if (rsc_cmd == 'o') { rc = cli_resource_print_operations(rsc_id, host_uname, FALSE, data_set); } else if (rsc_cmd == 'W') { rc = cli_resource_search(rsc, rsc_id, data_set); if (rc >= 0) { rc = pcmk_ok; } } else if (rsc_cmd == 'q') { rc = cli_resource_print(rsc, data_set, TRUE); } else if (rsc_cmd == 'w') { rc = cli_resource_print(rsc, data_set, FALSE); } else if (rsc_cmd == 'Y') { pe_node_t *dest = NULL; if (host_uname) { dest = pe_find_node(data_set->nodes, host_uname); if (dest == NULL) { rc = -pcmk_err_node_unknown; goto bail; } } cli_resource_why(cib_conn, data_set->resources, rsc, dest); rc = pcmk_ok; } else if (rsc_cmd == 'U') { GListPtr before = NULL; GListPtr after = NULL; GListPtr remaining = NULL; GListPtr ele = NULL; pe_node_t *dest = NULL; if (BE_QUIET == FALSE) { before = build_constraint_list(data_set->input); } if (clear_expired == TRUE) { rc = cli_resource_clear_all_expired(data_set->input, cib_conn, rsc_id, host_uname, scope_master); } else if (host_uname) { dest = pe_find_node(data_set->nodes, host_uname); if (dest == NULL) { rc = -pcmk_err_node_unknown; if (BE_QUIET == FALSE) { g_list_free(before); } goto bail; } rc = cli_resource_clear(rsc_id, dest->details->uname, NULL, cib_conn, TRUE); } else { rc = cli_resource_clear(rsc_id, NULL, data_set->nodes, cib_conn, TRUE); } if (BE_QUIET == FALSE) { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { CMD_ERR("Could not get modified CIB: %s\n", pcmk_strerror(rc)); g_list_free(before); goto bail; } data_set->input = cib_xml_copy; cluster_status(data_set); after = build_constraint_list(data_set->input); remaining = subtract_lists(before, after, (GCompareFunc) strcmp); for (ele = remaining; ele != NULL; ele = ele->next) { printf("Removing constraint: %s\n", (char *) ele->data); } g_list_free(before); g_list_free(after); g_list_free(remaining); } } else if (rsc_cmd == 'M' && host_uname) { rc = cli_resource_move(rsc, rsc_id, host_uname, cib_conn, data_set); } else if (rsc_cmd == 'B' && host_uname) { pe_node_t *dest = pe_find_node(data_set->nodes, host_uname); if (dest == NULL) { rc = -pcmk_err_node_unknown; goto bail; } rc = cli_resource_ban(rsc_id, dest->details->uname, NULL, cib_conn); } else if (rsc_cmd == 'B' || rsc_cmd == 'M') { pe_node_t *current = NULL; unsigned int nactive = 0; current = pe__find_active_requires(rsc, &nactive); if (nactive == 1) { rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn); } else if (is_set(rsc->flags, pe_rsc_promotable)) { int count = 0; GListPtr iter = NULL; current = NULL; for(iter = rsc->children; iter; iter = iter->next) { pe_resource_t *child = (pe_resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { count++; current = pe__current_node(child); } } if(count == 1 && current) { rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn); } else { rc = -EINVAL; exit_code = CRM_EX_USAGE; CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).", rsc_id, nactive, count); CMD_ERR("To prevent '%s' from running on a specific location, " "specify a node.", rsc_id); CMD_ERR("To prevent '%s' from being promoted at a specific " "location, specify a node and the master option.", rsc_id); } } else { rc = -EINVAL; exit_code = CRM_EX_USAGE; CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, nactive); CMD_ERR("To prevent '%s' from running on a specific location, " "specify a node.", rsc_id); } } else if (rsc_cmd == 'G') { rc = cli_resource_print_property(rsc, prop_name, data_set); } else if (rsc_cmd == 'S') { xmlNode *msg_data = NULL; if ((rsc_type == NULL) || !strlen(rsc_type)) { CMD_ERR("Must specify -t with resource type"); rc = -ENXIO; goto bail; } else if ((prop_value == NULL) || !strlen(prop_value)) { CMD_ERR("Must supply -v with new value"); rc = -EINVAL; goto bail; } CRM_LOG_ASSERT(prop_name != NULL); msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); crm_xml_add(msg_data, prop_name, prop_value); rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else if (rsc_cmd == 'g') { rc = cli_resource_print_attribute(rsc, prop_name, data_set); } else if (rsc_cmd == 'p') { if (prop_value == NULL || strlen(prop_value) == 0) { CMD_ERR("You need to supply a value with the -v option"); rc = -EINVAL; goto bail; } /* coverity[var_deref_model] False positive */ rc = cli_resource_update_attribute(rsc, rsc_id, prop_set, prop_id, prop_name, prop_value, recursive, cib_conn, data_set); } else if (rsc_cmd == 'd') { /* coverity[var_deref_model] False positive */ rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id, prop_name, cib_conn, data_set); } else if ((rsc_cmd == 'C') && rsc) { if (do_force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Erasing failures of %s (%s requested) on %s", rsc->id, rsc_id, (host_uname? host_uname: "all nodes")); rc = cli_resource_delete(controld_api, host_uname, rsc, operation, interval_spec, TRUE, data_set); if ((rc == pcmk_ok) && !BE_QUIET) { // Show any reasons why resource might stay stopped cli_resource_check(cib_conn, rsc); } if (rc == pcmk_ok) { start_mainloop(controld_api); } } else if (rsc_cmd == 'C') { rc = cli_cleanup_all(controld_api, host_uname, operation, interval_spec, data_set); if (rc == pcmk_ok) { start_mainloop(controld_api); } } else if ((rsc_cmd == 'R') && rsc) { if (do_force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Re-checking the state of %s (%s requested) on %s", rsc->id, rsc_id, (host_uname? host_uname: "all nodes")); rc = cli_resource_delete(controld_api, host_uname, rsc, NULL, 0, FALSE, data_set); if ((rc == pcmk_ok) && !BE_QUIET) { // Show any reasons why resource might stay stopped cli_resource_check(cib_conn, rsc); } if (rc == pcmk_ok) { start_mainloop(controld_api); } } else if (rsc_cmd == 'R') { const char *router_node = host_uname; int attr_options = pcmk__node_attr_none; if (host_uname) { pe_node_t *node = pe_find_node(data_set->nodes, host_uname); if (pe__is_guest_or_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { CMD_ERR("No cluster connection to Pacemaker Remote node %s detected", host_uname); rc = -ENXIO; goto bail; } router_node = node->details->uname; attr_options |= pcmk__node_attr_remote; } } if (controld_api == NULL) { printf("Dry run: skipping clean-up of %s due to CIB_file\n", host_uname? host_uname : "all nodes"); rc = pcmk_ok; goto bail; } crm_debug("Re-checking the state of all resources on %s", host_uname?host_uname:"all nodes"); rc = pcmk_rc2legacy(pcmk__node_attr_request_clear(NULL, host_uname, NULL, NULL, NULL, NULL, attr_options)); if (controld_api->reprobe(controld_api, host_uname, router_node) == pcmk_rc_ok) { start_mainloop(controld_api); } } else if (rsc_cmd == 'D') { xmlNode *msg_data = NULL; if (rsc_type == NULL) { CMD_ERR("You need to specify a resource type with -t"); rc = -ENXIO; goto bail; } msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else { CMD_ERR("Unknown command: %c", rsc_cmd); } bail: if (is_ocf_rc) { exit_code = rc; } else if (rc != pcmk_ok) { CMD_ERR("Error performing operation: %s", pcmk_strerror(rc)); if (rc == -pcmk_err_no_quorum) { CMD_ERR("To ignore quorum, use the force option"); } if (exit_code == CRM_EX_OK) { exit_code = crm_errno2exit(rc); } } - bye(exit_code); + return bye(exit_code); } diff --git a/tools/crm_rule.c b/tools/crm_rule.c index b5e4b5ba46..116c86cdc2 100644 --- a/tools/crm_rule.c +++ b/tools/crm_rule.c @@ -1,362 +1,362 @@ /* * Copyright 2019-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #define SUMMARY "evaluate rules from the Pacemaker configuration" enum crm_rule_mode { crm_rule_mode_none, crm_rule_mode_check }; struct { char *date; char *input_xml; enum crm_rule_mode mode; char *rule; } options = { .mode = crm_rule_mode_none }; static int crm_rule_check(pe_working_set_t *data_set, const char *rule_id, crm_time_t *effective_date); static gboolean mode_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); static GOptionEntry mode_entries[] = { { "check", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, mode_cb, "Check whether a rule is in effect", NULL }, { NULL } }; static GOptionEntry data_entries[] = { { "xml-text", 'X', 0, G_OPTION_ARG_STRING, &options.input_xml, "Use argument for XML (or stdin if '-')", NULL }, { NULL } }; static GOptionEntry addl_entries[] = { { "date", 'd', 0, G_OPTION_ARG_STRING, &options.date, "Whether the rule is in effect on a given date", NULL }, { "rule", 'r', 0, G_OPTION_ARG_STRING, &options.rule, "The ID of the rule to check", NULL }, { NULL } }; static gboolean mode_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (strcmp(option_name, "c")) { options.mode = crm_rule_mode_check; } return TRUE; } static int crm_rule_check(pe_working_set_t *data_set, const char *rule_id, crm_time_t *effective_date) { xmlNode *cib_constraints = NULL; xmlNode *match = NULL; xmlXPathObjectPtr xpathObj = NULL; char *xpath = NULL; int rc = pcmk_rc_ok; int max = 0; /* Rules are under the constraints node in the XML, so first find that. */ cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); /* Get all rules matching the given ID which are also simple enough for us to check. * For the moment, these rules must only have a single date_expression child and: * - Do not have a date_spec operation, or * - Have a date_spec operation that contains years= but does not contain moon=. * * We do this in steps to provide better error messages. First, check that there's * any rule with the given ID. */ xpath = crm_strdup_printf("//rule[@id='%s']", rule_id); xpathObj = xpath_search(cib_constraints, xpath); max = numXpathResults(xpathObj); if (max == 0) { CMD_ERR("No rule found with ID=%s", rule_id); rc = ENXIO; goto bail; } else if (max > 1) { CMD_ERR("More than one rule with ID=%s found", rule_id); rc = ENXIO; goto bail; } free(xpath); freeXpathObject(xpathObj); /* Next, make sure it has exactly one date_expression. */ xpath = crm_strdup_printf("//rule[@id='%s']//date_expression", rule_id); xpathObj = xpath_search(cib_constraints, xpath); max = numXpathResults(xpathObj); if (max != 1) { CMD_ERR("Can't check rule %s because it does not have exactly one date_expression", rule_id); rc = EOPNOTSUPP; goto bail; } free(xpath); freeXpathObject(xpathObj); /* Then, check that it's something we actually support. */ xpath = crm_strdup_printf("//rule[@id='%s']//date_expression[@operation!='date_spec']", rule_id); xpathObj = xpath_search(cib_constraints, xpath); max = numXpathResults(xpathObj); if (max == 0) { free(xpath); freeXpathObject(xpathObj); xpath = crm_strdup_printf("//rule[@id='%s']//date_expression[@operation='date_spec' and date_spec/@years and not(date_spec/@moon)]", rule_id); xpathObj = xpath_search(cib_constraints, xpath); max = numXpathResults(xpathObj); if (max == 0) { CMD_ERR("Rule either must not use date_spec, or use date_spec with years= but not moon="); rc = ENXIO; goto bail; } } match = getXpathResult(xpathObj, 0); /* We should have ensured both of these pass with the xpath query above, but * double checking can't hurt. */ CRM_ASSERT(match != NULL); CRM_ASSERT(find_expression_type(match) == time_expr); rc = pe_eval_date_expression(match, effective_date, NULL); if (rc == pcmk_rc_within_range) { printf("Rule %s is still in effect\n", rule_id); rc = pcmk_rc_ok; } else if (rc == pcmk_rc_ok) { printf("Rule %s satisfies conditions\n", rule_id); } else if (rc == pcmk_rc_after_range) { printf("Rule %s is expired\n", rule_id); } else if (rc == pcmk_rc_before_range) { printf("Rule %s has not yet taken effect\n", rule_id); } else if (rc == pcmk_rc_op_unsatisfied) { printf("Rule %s does not satisfy conditions\n", rule_id); } else { printf("Could not determine whether rule %s is expired\n", rule_id); } bail: free(xpath); freeXpathObject(xpathObj); return rc; } static GOptionContext * build_arg_context(pcmk__common_args_t *args) { GOptionContext *context = NULL; const char *description = "This tool is currently experimental.\n" "The interface, behavior, and output may change with any version of pacemaker."; context = pcmk__build_arg_context(args, NULL, NULL); g_option_context_set_description(context, description); pcmk__add_arg_group(context, "modes", "Modes (mutually exclusive):", "Show modes of operation", mode_entries); pcmk__add_arg_group(context, "data", "Data:", "Show data options", data_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); return context; } int main(int argc, char **argv) { cib_t *cib_conn = NULL; pe_working_set_t *data_set = NULL; crm_time_t *rule_date = NULL; xmlNode *input = NULL; int rc = pcmk_ok; crm_exit_t exit_code = CRM_EX_OK; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); GError *error = NULL; GOptionContext *context = NULL; gchar **processed_args = NULL; context = build_arg_context(args); crm_log_cli_init("crm_rule"); processed_args = pcmk__cmdline_preproc(argv, "nopNO"); if (!g_option_context_parse_strv(context, &processed_args, &error)) { CMD_ERR("%s: %s\n", g_get_prgname(), error->message); exit_code = CRM_EX_USAGE; goto bail; } for (int i = 0; i < args->verbosity; i++) { crm_bump_log_level(argc, argv); } if (args->version) { /* FIXME: When crm_rule is converted to use formatted output, this can go. */ pcmk__cli_help('v', CRM_EX_USAGE); } if (optind > argc) { char *help = g_option_context_get_help(context, TRUE, NULL); CMD_ERR("%s", help); free(help); exit_code = CRM_EX_USAGE; goto bail; } /* Check command line arguments before opening a connection to * the CIB manager or doing anything else important. */ switch(options.mode) { case crm_rule_mode_check: if (options.rule == NULL) { CMD_ERR("--check requires use of --rule="); exit_code = CRM_EX_USAGE; goto bail; } break; default: CMD_ERR("No mode operation given"); exit_code = CRM_EX_USAGE; goto bail; break; } /* Set up some defaults. */ rule_date = crm_time_new(options.date); if (rule_date == NULL) { CMD_ERR("No --date given and can't determine current date"); exit_code = CRM_EX_DATAERR; goto bail; } /* Where does the XML come from? If one of various command line options were * given, use those. Otherwise, connect to the CIB and use that. */ if (safe_str_eq(options.input_xml, "-")) { input = stdin2xml(); if (input == NULL) { CMD_ERR("Couldn't parse input from STDIN\n"); exit_code = CRM_EX_DATAERR; goto bail; } } else if (options.input_xml != NULL) { input = string2xml(options.input_xml); if (input == NULL) { CMD_ERR("Couldn't parse input string: %s\n", options.input_xml); exit_code = CRM_EX_DATAERR; goto bail; } } else { - /* Establish a connection to the CIB manager */ + // Establish a connection to the CIB cib_conn = cib_new(); rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); if (rc != pcmk_ok) { - CMD_ERR("Error connecting to the CIB manager: %s", pcmk_strerror(rc)); + CMD_ERR("Could not connect to CIB: %s", pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto bail; } } /* Populate working set from CIB query */ if (input == NULL) { rc = cib_conn->cmds->query(cib_conn, NULL, &input, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { exit_code = crm_errno2exit(rc); goto bail; } } /* Populate the working set instance */ data_set = pe_new_working_set(); if (data_set == NULL) { exit_code = crm_errno2exit(ENOMEM); goto bail; } set_bit(data_set->flags, pe_flag_no_counts); set_bit(data_set->flags, pe_flag_no_compat); data_set->input = input; data_set->now = rule_date; /* Unpack everything. */ cluster_status(data_set); /* Now do whichever operation mode was asked for. There's only one at the * moment so this looks a little silly, but I expect there will be more * modes in the future. */ switch(options.mode) { case crm_rule_mode_check: rc = crm_rule_check(data_set, options.rule, rule_date); if (rc > 0) { CMD_ERR("Error checking rule: %s", pcmk_rc_str(rc)); } exit_code = pcmk_rc2exitc(rc); break; default: break; } bail: if (cib_conn != NULL) { cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } g_strfreev(processed_args); g_clear_error(&error); pcmk__free_arg_context(context); pe_free_working_set(data_set); crm_exit(exit_code); } diff --git a/tools/crm_shadow.c b/tools/crm_shadow.c index 41c368aecf..c01130e4a4 100644 --- a/tools/crm_shadow.c +++ b/tools/crm_shadow.c @@ -1,555 +1,556 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int command_options = cib_sync_call; static cib_t *real_cib = NULL; static int force_flag = 0; static int batch_flag = 0; static char * get_shadow_prompt(const char *name) { return crm_strdup_printf("shadow[%.40s] # ", name); } static void shadow_setup(char *name, gboolean do_switch) { const char *prompt = getenv("PS1"); const char *shell = getenv("SHELL"); char *new_prompt = get_shadow_prompt(name); printf("Setting up shadow instance\n"); if (safe_str_eq(new_prompt, prompt)) { /* nothing to do */ goto done; } else if (batch_flag == FALSE && shell != NULL) { setenv("PS1", new_prompt, 1); setenv("CIB_shadow", name, 1); printf("Type Ctrl-D to exit the crm_shadow shell\n"); if (strstr(shell, "bash")) { execl(shell, shell, "--norc", "--noprofile", NULL); } else { execl(shell, shell, NULL); } } else if (do_switch) { printf("To switch to the named shadow instance, paste the following into your shell:\n"); } else { printf ("A new shadow instance was created. To begin using it paste the following into your shell:\n"); } printf(" CIB_shadow=%s ; export CIB_shadow\n", name); done: free(new_prompt); } static void shadow_teardown(char *name) { const char *prompt = getenv("PS1"); char *our_prompt = get_shadow_prompt(name); if (prompt != NULL && strstr(prompt, our_prompt)) { printf("Now type Ctrl-D to exit the crm_shadow shell\n"); } else { printf ("Please remember to unset the CIB_shadow variable by pasting the following into your shell:\n"); printf(" unset CIB_shadow\n"); } free(our_prompt); } static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "help", no_argument, NULL, '?', "\t\tThis text", pcmk__option_default }, { "version", no_argument, NULL, '$', "\t\tVersion information", pcmk__option_default }, { "verbose", no_argument, NULL, 'V', "\t\tIncrease debug output", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nQueries:", pcmk__option_default }, { "which", no_argument, NULL, 'w', "\t\tIndicate the active shadow copy", pcmk__option_default }, { "display", no_argument, NULL, 'p', "\t\tDisplay the contents of the active shadow copy", pcmk__option_default }, { "edit", no_argument, NULL, 'E', "\t\tEdit the contents of the active shadow copy with your " "favorite $EDITOR", pcmk__option_default }, { "diff", no_argument, NULL, 'd', "\t\tDisplay the changes in the active shadow copy\n", pcmk__option_default }, { "file", no_argument, NULL, 'F', "\t\tDisplay the location of the active shadow copy file\n", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nCommands:", pcmk__option_default }, { "create", required_argument, NULL, 'c', "\tCreate the named shadow copy of the active cluster configuration", pcmk__option_default }, { "create-empty", required_argument, NULL, 'e', "Create the named shadow copy with an empty cluster configuration. " "Optional: --validate-with", pcmk__option_default }, { "commit", required_argument, NULL, 'C', "\tUpload the contents of the named shadow copy to the cluster", pcmk__option_default }, { "delete", required_argument, NULL, 'D', "\tDelete the contents of the named shadow copy", pcmk__option_default }, { "reset", required_argument, NULL, 'r', "\tRecreate named shadow copy from the active cluster configuration", pcmk__option_default }, { "switch", required_argument, NULL, 's', "\t(Advanced) Switch to the named shadow copy", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:", pcmk__option_default }, { "force", no_argument, NULL, 'f', "\t\t(Advanced) Force the action to be performed", pcmk__option_default }, { "batch", no_argument, NULL, 'b', "\t\t(Advanced) Don't spawn a new shell", pcmk__option_default }, { "all", no_argument, NULL, 'a', "\t\t(Advanced) Upload entire CIB, including status, with --commit", pcmk__option_default }, { "validate-with", required_argument, NULL, 'v', "(Advanced) Create an older configuration version", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nExamples:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', "Create a blank shadow configuration:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_shadow --create-empty myShadow", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Create a shadow configuration from the running cluster:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_shadow --create myShadow", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Display the current shadow configuration:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_shadow --display", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Discard the current shadow configuration (named myShadow):", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_shadow --delete myShadow --force", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Upload current shadow configuration (named myShadow) " "to running cluster:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_shadow --commit myShadow", pcmk__option_example }, { 0, 0, 0, 0 } }; int main(int argc, char **argv) { int rc = pcmk_ok; int flag; int argerr = 0; crm_exit_t exit_code = CRM_EX_OK; static int command = '?'; const char *validation = NULL; char *shadow = NULL; char *shadow_file = NULL; gboolean full_upload = FALSE; gboolean dangerous_cmd = FALSE; struct stat buf; int option_index = 0; crm_log_cli_init("crm_shadow"); pcmk__set_cli_options(NULL, "| [options]", long_options, "perform Pacemaker configuration changes in a sandbox" "\n\nThis command sets up an environment in which " "configuration tools (cibadmin,\ncrm_resource, " "etc.) work offline instead of against a live " "cluster, allowing\nchanges to be previewed and " "tested for side-effects.\n"); if (argc < 2) { pcmk__cli_help('?', CRM_EX_USAGE); } while (1) { flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); if (flag == -1 || flag == 0) break; switch (flag) { case 'a': full_upload = TRUE; break; case 'd': case 'E': case 'p': case 'w': case 'F': command = flag; free(shadow); shadow = NULL; { const char *env = getenv("CIB_shadow"); if(env) { shadow = strdup(env); } else { fprintf(stderr, "No active shadow configuration defined\n"); crm_exit(CRM_EX_NOSUCH); } } break; case 'v': validation = optarg; break; case 'e': case 'c': case 's': case 'r': command = flag; free(shadow); shadow = strdup(optarg); break; case 'C': case 'D': command = flag; dangerous_cmd = TRUE; free(shadow); shadow = strdup(optarg); break; case 'V': command_options = command_options | cib_verbose; crm_bump_log_level(argc, argv); break; case '$': case '?': pcmk__cli_help(flag, CRM_EX_OK); break; case 'f': command_options |= cib_quorum_override; force_flag = 1; break; case 'b': batch_flag = 1; break; default: printf("Argument code 0%o (%c)" " is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); pcmk__cli_help('?', CRM_EX_USAGE); } if (optind > argc) { ++argerr; } if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } if (command == 'w') { /* which shadow instance is active? */ const char *local = getenv("CIB_shadow"); if (local == NULL) { fprintf(stderr, "No shadow instance provided\n"); exit_code = CRM_EX_NOSUCH; } else { fprintf(stdout, "%s\n", local); } goto done; } if (shadow == NULL) { fprintf(stderr, "No shadow instance provided\n"); fflush(stderr); exit_code = CRM_EX_NOSUCH; goto done; } else if (command != 's' && command != 'c') { const char *local = getenv("CIB_shadow"); if (local != NULL && safe_str_neq(local, shadow) && force_flag == FALSE) { fprintf(stderr, "The supplied shadow instance (%s) is not the same as the active one (%s).\n" " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n", shadow, local); fflush(stderr); exit_code = CRM_EX_USAGE; goto done; } } if (dangerous_cmd && force_flag == FALSE) { fprintf(stderr, "The supplied command is considered dangerous." " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n"); fflush(stderr); exit_code = CRM_EX_USAGE; goto done; } shadow_file = get_shadow_file(shadow); if (command == 'D') { /* delete the file */ if ((unlink(shadow_file) < 0) && (errno != ENOENT)) { exit_code = crm_errno2exit(errno); fprintf(stderr, "Could not remove shadow instance '%s': %s\n", shadow, strerror(errno)); } shadow_teardown(shadow); goto done; } else if (command == 'F') { printf("%s\n", shadow_file); goto done; } if (command == 'd' || command == 'r' || command == 'c' || command == 'C') { real_cib = cib_new_no_shadow(); rc = real_cib->cmds->signon(real_cib, crm_system_name, cib_command); if (rc != pcmk_ok) { - fprintf(stderr, "Signon to CIB failed: %s\n", pcmk_strerror(rc)); + fprintf(stderr, "Could not connect to CIB: %s\n", + pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto done; } } // File existence check rc = stat(shadow_file, &buf); if (command == 'e' || command == 'c') { if (rc == 0 && force_flag == FALSE) { fprintf(stderr, "A shadow instance '%s' already exists.\n" " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n", shadow); exit_code = CRM_EX_CANTCREAT; goto done; } } else if (rc < 0) { fprintf(stderr, "Could not access shadow instance '%s': %s\n", shadow, strerror(errno)); exit_code = CRM_EX_NOSUCH; goto done; } if (command == 'c' || command == 'e' || command == 'r') { xmlNode *output = NULL; /* create a shadow instance based on the current cluster config */ if (command == 'c' || command == 'r') { rc = real_cib->cmds->query(real_cib, NULL, &output, command_options); if (rc != pcmk_ok) { fprintf(stderr, "Could not connect to the CIB manager: %s\n", pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto done; } } else { output = createEmptyCib(0); if(validation) { crm_xml_add(output, XML_ATTR_VALIDATION, validation); } printf("Created new %s configuration\n", crm_element_value(output, XML_ATTR_VALIDATION)); } rc = write_xml_file(output, shadow_file, FALSE); free_xml(output); if (rc < 0) { fprintf(stderr, "Could not %s the shadow instance '%s': %s\n", command == 'r' ? "reset" : "create", shadow, pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto done; } shadow_setup(shadow, FALSE); } else if (command == 'E') { char *editor = getenv("EDITOR"); if (editor == NULL) { fprintf(stderr, "No value for EDITOR defined\n"); exit_code = CRM_EX_NOT_CONFIGURED; goto done; } execlp(editor, "--", shadow_file, NULL); fprintf(stderr, "Could not invoke EDITOR (%s %s): %s\n", editor, shadow_file, strerror(errno)); exit_code = CRM_EX_OSFILE; goto done; } else if (command == 's') { shadow_setup(shadow, TRUE); goto done; } else if (command == 'p') { /* display the current contents */ char *output_s = NULL; xmlNode *output = filename2xml(shadow_file); output_s = dump_xml_formatted(output); printf("%s", output_s); free(output_s); free_xml(output); } else if (command == 'd') { /* diff against cluster */ xmlNode *diff = NULL; xmlNode *old_config = NULL; xmlNode *new_config = filename2xml(shadow_file); rc = real_cib->cmds->query(real_cib, NULL, &old_config, command_options); if (rc != pcmk_ok) { fprintf(stderr, "Could not query the CIB: %s\n", pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto done; } xml_track_changes(new_config, NULL, new_config, FALSE); xml_calculate_changes(old_config, new_config); diff = xml_create_patchset(0, old_config, new_config, NULL, FALSE); xml_log_changes(LOG_INFO, __FUNCTION__, new_config); xml_accept_changes(new_config); if (diff != NULL) { xml_log_patchset(LOG_STDOUT, " ", diff); exit_code = CRM_EX_ERROR; } goto done; } else if (command == 'C') { /* commit to the cluster */ xmlNode *input = filename2xml(shadow_file); xmlNode *section_xml = input; const char *section = NULL; if (!full_upload) { section = XML_CIB_TAG_CONFIGURATION; section_xml = first_named_child(input, section); } rc = real_cib->cmds->replace(real_cib, section, section_xml, command_options); if (rc != pcmk_ok) { fprintf(stderr, "Could not commit shadow instance '%s' to the CIB: %s\n", shadow, pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); } shadow_teardown(shadow); free_xml(input); } done: free(shadow_file); free(shadow); crm_exit(exit_code); } diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c index cb216b0ede..60a2d70938 100644 --- a/tools/crm_simulate.c +++ b/tools/crm_simulate.c @@ -1,1109 +1,1109 @@ /* * Copyright 2009-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include cib_t *global_cib = NULL; GListPtr op_fail = NULL; bool action_numbers = FALSE; gboolean quiet = FALSE; gboolean print_pending = TRUE; char *temp_shadow = NULL; extern gboolean bringing_nodes_online; #define quiet_log(fmt, args...) do { \ if(quiet == FALSE) { \ printf(fmt , ##args); \ } \ } while(0) char *use_date = NULL; static void get_date(pe_working_set_t *data_set, bool print_original) { time_t original_date = 0; crm_element_value_epoch(data_set->input, "execution-date", &original_date); if (use_date) { data_set->now = crm_time_new(use_date); quiet_log(" + Setting effective cluster time: %s", use_date); crm_time_log(LOG_NOTICE, "Pretending 'now' is", data_set->now, crm_time_log_date | crm_time_log_timeofday); } else if (original_date) { data_set->now = crm_time_new(NULL); crm_time_set_timet(data_set->now, &original_date); if (print_original) { char *when = crm_time_as_string(data_set->now, crm_time_log_date|crm_time_log_timeofday); printf("Using the original execution date of: %s\n", when); free(when); } } } static void print_cluster_status(pe_working_set_t * data_set, long options) { char *online_nodes = NULL; char *online_remote_nodes = NULL; char *online_guest_nodes = NULL; char *offline_nodes = NULL; char *offline_remote_nodes = NULL; GListPtr gIter = NULL; for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; const char *node_mode = NULL; char *node_name = NULL; if (pe__is_guest_node(node)) { node_name = crm_strdup_printf("%s:%s", node->details->uname, node->details->remote_rsc->container->id); } else { node_name = crm_strdup_printf("%s", node->details->uname); } if (node->details->unclean) { if (node->details->online && node->details->unclean) { node_mode = "UNCLEAN (online)"; } else if (node->details->pending) { node_mode = "UNCLEAN (pending)"; } else { node_mode = "UNCLEAN (offline)"; } } else if (node->details->pending) { node_mode = "pending"; } else if (node->details->standby_onfail && node->details->online) { node_mode = "standby (on-fail)"; } else if (node->details->standby) { if (node->details->online) { node_mode = "standby"; } else { node_mode = "OFFLINE (standby)"; } } else if (node->details->maintenance) { if (node->details->online) { node_mode = "maintenance"; } else { node_mode = "OFFLINE (maintenance)"; } } else if (node->details->online) { if (pe__is_guest_node(node)) { online_guest_nodes = pcmk__add_word(online_guest_nodes, node_name); } else if (pe__is_remote_node(node)) { online_remote_nodes = pcmk__add_word(online_remote_nodes, node_name); } else { online_nodes = pcmk__add_word(online_nodes, node_name); } free(node_name); continue; } else { if (pe__is_remote_node(node)) { offline_remote_nodes = pcmk__add_word(offline_remote_nodes, node_name); } else if (pe__is_guest_node(node)) { /* ignore offline container nodes */ } else { offline_nodes = pcmk__add_word(offline_nodes, node_name); } free(node_name); continue; } if (pe__is_guest_node(node)) { printf("GuestNode %s: %s\n", node_name, node_mode); } else if (pe__is_remote_node(node)) { printf("RemoteNode %s: %s\n", node_name, node_mode); } else if (safe_str_eq(node->details->uname, node->details->id)) { printf("Node %s: %s\n", node_name, node_mode); } else { printf("Node %s (%s): %s\n", node_name, node->details->id, node_mode); } free(node_name); } if (online_nodes) { printf("Online: [%s ]\n", online_nodes); free(online_nodes); } if (offline_nodes) { printf("OFFLINE: [%s ]\n", offline_nodes); free(offline_nodes); } if (online_remote_nodes) { printf("RemoteOnline: [%s ]\n", online_remote_nodes); free(online_remote_nodes); } if (offline_remote_nodes) { printf("RemoteOFFLINE: [%s ]\n", offline_remote_nodes); free(offline_remote_nodes); } if (online_guest_nodes) { printf("GuestOnline: [%s ]\n", online_guest_nodes); free(online_guest_nodes); } fprintf(stdout, "\n"); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; if (is_set(rsc->flags, pe_rsc_orphan) && rsc->role == RSC_ROLE_STOPPED) { continue; } rsc->fns->print(rsc, NULL, pe_print_printf | options, stdout); } fprintf(stdout, "\n"); } static char * create_action_name(pe_action_t *action) { char *action_name = NULL; const char *prefix = ""; const char *action_host = NULL; const char *clone_name = NULL; const char *task = action->task; if (action->node) { action_host = action->node->details->uname; } else if (is_not_set(action->flags, pe_action_pseudo)) { action_host = ""; } if (safe_str_eq(action->task, RSC_CANCEL)) { prefix = "Cancel "; task = action->cancel_task; } if (action->rsc && action->rsc->clone_name) { clone_name = action->rsc->clone_name; } if (clone_name) { char *key = NULL; guint interval_ms = 0; if (pcmk__guint_from_hash(action->meta, XML_LRM_ATTR_INTERVAL_MS, 0, &interval_ms) != pcmk_rc_ok) { interval_ms = 0; } if (safe_str_eq(action->task, RSC_NOTIFY) || safe_str_eq(action->task, RSC_NOTIFIED)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_key_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_key_operation"); CRM_ASSERT(n_type != NULL); CRM_ASSERT(n_task != NULL); key = pcmk__notify_key(clone_name, n_type, n_task); } else { key = pcmk__op_key(clone_name, task, interval_ms); } if (action_host) { action_name = crm_strdup_printf("%s%s %s", prefix, key, action_host); } else { action_name = crm_strdup_printf("%s%s", prefix, key); } free(key); } else if (safe_str_eq(action->task, CRM_OP_FENCE)) { const char *op = g_hash_table_lookup(action->meta, "stonith_action"); action_name = crm_strdup_printf("%s%s '%s' %s", prefix, action->task, op, action_host); } else if (action->rsc && action_host) { action_name = crm_strdup_printf("%s%s %s", prefix, action->uuid, action_host); } else if (action_host) { action_name = crm_strdup_printf("%s%s %s", prefix, action->task, action_host); } else { action_name = crm_strdup_printf("%s", action->uuid); } if (action_numbers) { // i.e. verbose char *with_id = crm_strdup_printf("%s (%d)", action_name, action->id); free(action_name); action_name = with_id; } return action_name; } static void create_dotfile(pe_working_set_t * data_set, const char *dot_file, gboolean all_actions) { GListPtr gIter = NULL; FILE *dot_strm = fopen(dot_file, "w"); if (dot_strm == NULL) { crm_perror(LOG_ERR, "Could not open %s for writing", dot_file); return; } fprintf(dot_strm, " digraph \"g\" {\n"); for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; const char *style = "dashed"; const char *font = "black"; const char *color = "black"; char *action_name = create_action_name(action); crm_trace("Action %d: %s %s %p", action->id, action_name, action->uuid, action); if (is_set(action->flags, pe_action_pseudo)) { font = "orange"; } if (is_set(action->flags, pe_action_dumped)) { style = "bold"; color = "green"; } else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) { color = "red"; font = "purple"; if (all_actions == FALSE) { goto do_not_write; } } else if (is_set(action->flags, pe_action_optional)) { color = "blue"; if (all_actions == FALSE) { goto do_not_write; } } else { color = "red"; CRM_CHECK(is_set(action->flags, pe_action_runnable) == FALSE,; ); } set_bit(action->flags, pe_action_dumped); crm_trace("\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]", action_name, style, color, font); fprintf(dot_strm, "\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]\n", action_name, style, color, font); do_not_write: free(action_name); } for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; GListPtr gIter2 = NULL; for (gIter2 = action->actions_before; gIter2 != NULL; gIter2 = gIter2->next) { pe_action_wrapper_t *before = (pe_action_wrapper_t *) gIter2->data; char *before_name = NULL; char *after_name = NULL; const char *style = "dashed"; gboolean optional = TRUE; if (before->state == pe_link_dumped) { optional = FALSE; style = "bold"; } else if (is_set(action->flags, pe_action_pseudo) && (before->type & pe_order_stonith_stop)) { continue; } else if (before->type == pe_order_none) { continue; } else if (is_set(before->action->flags, pe_action_dumped) && is_set(action->flags, pe_action_dumped) && before->type != pe_order_load) { optional = FALSE; } if (all_actions || optional == FALSE) { before_name = create_action_name(before->action); after_name = create_action_name(action); crm_trace("\"%s\" -> \"%s\" [ style = %s]", before_name, after_name, style); fprintf(dot_strm, "\"%s\" -> \"%s\" [ style = %s]\n", before_name, after_name, style); free(before_name); free(after_name); } } } fprintf(dot_strm, "}\n"); fflush(dot_strm); fclose(dot_strm); } static void setup_input(const char *input, const char *output) { int rc = pcmk_ok; cib_t *cib_conn = NULL; xmlNode *cib_object = NULL; char *local_output = NULL; if (input == NULL) { /* Use live CIB */ cib_conn = cib_new(); rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); if (rc == pcmk_ok) { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_object, cib_scope_local | cib_sync_call); } cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); cib_conn = NULL; if (rc != pcmk_ok) { fprintf(stderr, "Live CIB query failed: %s (%d)\n", pcmk_strerror(rc), rc); crm_exit(crm_errno2exit(rc)); } else if (cib_object == NULL) { fprintf(stderr, "Live CIB query failed: empty result\n"); crm_exit(CRM_EX_NOINPUT); } } else if (safe_str_eq(input, "-")) { cib_object = filename2xml(NULL); } else { cib_object = filename2xml(input); } if (get_object_root(XML_CIB_TAG_STATUS, cib_object) == NULL) { create_xml_node(cib_object, XML_CIB_TAG_STATUS); } if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) { free_xml(cib_object); crm_exit(CRM_EX_CONFIG); } if (validate_xml(cib_object, NULL, FALSE) != TRUE) { free_xml(cib_object); crm_exit(CRM_EX_CONFIG); } if (output == NULL) { char *pid = pcmk__getpid_s(); local_output = get_shadow_file(pid); temp_shadow = strdup(local_output); output = local_output; free(pid); } rc = write_xml_file(cib_object, output, FALSE); free_xml(cib_object); cib_object = NULL; if (rc < 0) { fprintf(stderr, "Could not create '%s': %s\n", output, pcmk_strerror(rc)); crm_exit(CRM_EX_CANTCREAT); } setenv("CIB_file", output, 1); free(local_output); } static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "help", no_argument, NULL, '?', "\tThis text", pcmk__option_default }, { "version", no_argument, NULL, '$', "\tVersion information", pcmk__option_default }, { "quiet", no_argument, NULL, 'Q', "\tDisplay only essential output", pcmk__option_default }, { "verbose", no_argument, NULL, 'V', "\tIncrease debug output", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nOperations:", pcmk__option_default }, { "run", no_argument, NULL, 'R', "\tDetermine cluster's response to the given configuration and status", pcmk__option_default }, { "simulate", no_argument, NULL, 'S', "Simulate transition's execution and display resulting cluster status", pcmk__option_default }, { "in-place", no_argument, NULL, 'X', "Simulate transition's execution and store result back to input file", pcmk__option_default }, { "show-scores", no_argument, NULL, 's', "Show allocation scores", pcmk__option_default }, { "show-utilization", no_argument, NULL, 'U', "Show utilization information", pcmk__option_default }, { "profile", required_argument, NULL, 'P', "Run all tests in the named directory to create profiling data", pcmk__option_default }, { "repeat", required_argument, NULL, 'N', "With --profile, repeat each test N times and print timings", pcmk__option_default }, { "pending", no_argument, NULL, 'j', "\tDisplay pending state if 'record-pending' is enabled", pcmk__option_hidden }, { "-spacer-", no_argument, NULL, '-', "\nSynthetic Cluster Events:", pcmk__option_default }, { "node-up", required_argument, NULL, 'u', "\tBring a node online", pcmk__option_default }, { "node-down", required_argument, NULL, 'd', "\tTake a node offline", pcmk__option_default }, { "node-fail", required_argument, NULL, 'f', "\tMark a node as failed", pcmk__option_default }, { "op-inject", required_argument, NULL, 'i', "\tGenerate a failure for the cluster to react to in the simulation", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\t\tValue is of the form " "${resource}_${task}_${interval_in_ms}@${node}=${rc}.", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\t\t(for example, memcached_monitor_20000@bart.example.com=7)", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\t\tFor more information on OCF return codes, refer to: " "https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/" "2.0/html/Pacemaker_Administration/s-ocf-return-codes.html", pcmk__option_default }, { "op-fail", required_argument, NULL, 'F', "\tIf the specified task occurs during the simulation, have it fail " "with return code ${rc}", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\t\tValue is of the form " "${resource}_${task}_${interval_in_ms}@${node}=${rc}.", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\t\t(for example, memcached_stop_0@bart.example.com=1)\n", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\t\tThe transition will normally stop at the failed action. Save " "the result with --save-output and re-run with --xml-file", pcmk__option_default }, { "set-datetime", required_argument, NULL, 't', "Set date/time (ISO 8601 format, see " "https://en.wikipedia.org/wiki/ISO_8601)", pcmk__option_default }, { "quorum", required_argument, NULL, 'q', "\tSpecify a value for quorum", pcmk__option_default }, { "watchdog", required_argument, NULL, 'w', "\tAssume a watchdog device is active", pcmk__option_default }, { "ticket-grant", required_argument, NULL, 'g', "Grant a ticket", pcmk__option_default }, { "ticket-revoke", required_argument, NULL, 'r', "Revoke a ticket", pcmk__option_default }, { "ticket-standby", required_argument, NULL, 'b', "Make a ticket standby", pcmk__option_default }, { "ticket-activate", required_argument, NULL, 'e', "Activate a ticket", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nOutput Options:", pcmk__option_default }, { "save-input", required_argument, NULL, 'I', "\tSave the input configuration to the named file", pcmk__option_default }, { "save-output", required_argument, NULL, 'O', "Save the output configuration to the named file", pcmk__option_default }, { "save-graph", required_argument, NULL, 'G', "\tSave the transition graph (XML format) to the named file", pcmk__option_default }, { "save-dotfile", required_argument, NULL, 'D', "Save the transition graph (DOT format) to the named file", pcmk__option_default }, { "all-actions", no_argument, NULL, 'a', "\tDisplay all possible actions in DOT graph (even if not part " "of transition)", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nData Source:", pcmk__option_default }, { "live-check", no_argument, NULL, 'L', "\tConnect to CIB mamager and use the current CIB contents as input", pcmk__option_default }, { "xml-file", required_argument, NULL, 'x', "\tRetrieve XML from the named file", pcmk__option_default }, { "xml-pipe", no_argument, NULL, 'p', "\tRetrieve XML from stdin", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nExamples:\n", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "Pretend a recurring monitor action found memcached stopped on node " "fred.example.com and, during recovery, that the memcached stop " "action failed", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_simulate -LS --op-inject " "memcached:0_monitor_20000@bart.example.com=7 " "--op-fail memcached:0_stop_0@fred.example.com=1 " "--save-output /tmp/memcached-test.xml", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Now see what the reaction to the stop failure would be", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_simulate -S --xml-file /tmp/memcached-test.xml", pcmk__option_example }, { 0, 0, 0, 0 } }; static void profile_one(const char *xml_file, long long repeat, pe_working_set_t *data_set) { xmlNode *cib_object = NULL; clock_t start = 0; printf("* Testing %s ...", xml_file); fflush(stdout); cib_object = filename2xml(xml_file); start = clock(); if (get_object_root(XML_CIB_TAG_STATUS, cib_object) == NULL) { create_xml_node(cib_object, XML_CIB_TAG_STATUS); } if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) { free_xml(cib_object); return; } if (validate_xml(cib_object, NULL, FALSE) != TRUE) { free_xml(cib_object); return; } for (int i = 0; i < repeat; ++i) { xmlNode *input = (repeat == 1)? cib_object : copy_xml(cib_object); data_set->input = input; get_date(data_set, false); pcmk__schedule_actions(data_set, input, NULL); pe_reset_working_set(data_set); } printf(" %.2f secs\n", (clock() - start) / (float) CLOCKS_PER_SEC); } #ifndef FILENAME_MAX # define FILENAME_MAX 512 #endif static void profile_all(const char *dir, long long repeat, pe_working_set_t *data_set) { struct dirent **namelist; int file_num = scandir(dir, &namelist, 0, alphasort); if (file_num > 0) { struct stat prop; char buffer[FILENAME_MAX]; while (file_num--) { if ('.' == namelist[file_num]->d_name[0]) { free(namelist[file_num]); continue; } else if (!pcmk__ends_with_ext(namelist[file_num]->d_name, ".xml")) { free(namelist[file_num]); continue; } snprintf(buffer, sizeof(buffer), "%s/%s", dir, namelist[file_num]->d_name); if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) { profile_one(buffer, repeat, data_set); } free(namelist[file_num]); } free(namelist); } } int main(int argc, char **argv) { int rc = pcmk_ok; guint modified = 0; gboolean store = FALSE; gboolean process = FALSE; gboolean simulate = FALSE; gboolean all_actions = FALSE; gboolean have_stdout = FALSE; pe_working_set_t *data_set = NULL; const char *xml_file = "-"; const char *quorum = NULL; const char *watchdog = NULL; const char *test_dir = NULL; const char *dot_file = NULL; const char *graph_file = NULL; const char *input_file = NULL; const char *output_file = NULL; const char *repeat_s = NULL; int flag = 0; int index = 0; int argerr = 0; long long repeat = 1; GListPtr node_up = NULL; GListPtr node_down = NULL; GListPtr node_fail = NULL; GListPtr op_inject = NULL; GListPtr ticket_grant = NULL; GListPtr ticket_revoke = NULL; GListPtr ticket_standby = NULL; GListPtr ticket_activate = NULL; xmlNode *input = NULL; crm_log_cli_init("crm_simulate"); pcmk__set_cli_options(NULL, " [options]", long_options, "simulate a Pacemaker cluster's response to events"); if (argc < 2) { pcmk__cli_help('?', CRM_EX_USAGE); } while (1) { flag = pcmk__next_cli_option(argc, argv, &index, NULL); if (flag == -1) break; switch (flag) { case 'V': if (have_stdout == FALSE) { /* Redirect stderr to stdout so we can grep the output */ have_stdout = TRUE; close(STDERR_FILENO); dup2(STDOUT_FILENO, STDERR_FILENO); } crm_bump_log_level(argc, argv); action_numbers = TRUE; break; case '?': case '$': pcmk__cli_help(flag, CRM_EX_OK); break; case 'p': xml_file = "-"; break; case 'Q': quiet = TRUE; break; case 'L': xml_file = NULL; break; case 'x': xml_file = optarg; break; case 'u': modified++; bringing_nodes_online = TRUE; node_up = g_list_append(node_up, optarg); break; case 'd': modified++; node_down = g_list_append(node_down, optarg); break; case 'f': modified++; node_fail = g_list_append(node_fail, optarg); break; case 't': use_date = strdup(optarg); break; case 'i': modified++; op_inject = g_list_append(op_inject, optarg); break; case 'F': process = TRUE; simulate = TRUE; op_fail = g_list_append(op_fail, optarg); break; case 'w': modified++; watchdog = optarg; break; case 'q': modified++; quorum = optarg; break; case 'g': modified++; ticket_grant = g_list_append(ticket_grant, optarg); break; case 'r': modified++; ticket_revoke = g_list_append(ticket_revoke, optarg); break; case 'b': modified++; ticket_standby = g_list_append(ticket_standby, optarg); break; case 'e': modified++; ticket_activate = g_list_append(ticket_activate, optarg); break; case 'a': all_actions = TRUE; break; case 's': process = TRUE; show_scores = TRUE; break; case 'U': process = TRUE; show_utilization = TRUE; break; case 'j': print_pending = TRUE; break; case 'S': process = TRUE; simulate = TRUE; break; case 'X': store = TRUE; process = TRUE; simulate = TRUE; break; case 'R': process = TRUE; break; case 'D': process = TRUE; dot_file = optarg; break; case 'G': process = TRUE; graph_file = optarg; break; case 'I': input_file = optarg; break; case 'O': output_file = optarg; break; case 'P': test_dir = optarg; break; case 'N': repeat_s = optarg; break; default: ++argerr; break; } } if (optind > argc) { ++argerr; } if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } data_set = pe_new_working_set(); if (data_set == NULL) { crm_perror(LOG_ERR, "Could not allocate working set"); rc = -ENOMEM; goto done; } set_bit(data_set->flags, pe_flag_no_compat); if (test_dir != NULL) { if (repeat_s != NULL) { repeat = crm_parse_ll(repeat_s, NULL); if (errno || (repeat < 1)) { fprintf(stderr, "--repeat must be positive integer, not '%s' -- using 1", repeat_s); repeat = 1; } } profile_all(test_dir, repeat, data_set); return CRM_EX_OK; } setup_input(xml_file, store ? xml_file : output_file); global_cib = cib_new(); rc = global_cib->cmds->signon(global_cib, crm_system_name, cib_command); if (rc != pcmk_ok) { - fprintf(stderr, "Could not connect to the CIB manager: %s\n", + fprintf(stderr, "Could not connect to the CIB: %s\n", pcmk_strerror(rc)); goto done; } rc = global_cib->cmds->query(global_cib, NULL, &input, cib_sync_call | cib_scope_local); if (rc != pcmk_ok) { fprintf(stderr, "Could not get local CIB: %s\n", pcmk_strerror(rc)); goto done; } data_set->input = input; get_date(data_set, true); if(xml_file) { set_bit(data_set->flags, pe_flag_sanitized); } set_bit(data_set->flags, pe_flag_stdout); cluster_status(data_set); if (quiet == FALSE) { int options = print_pending ? pe_print_pending : 0; if (is_set(data_set->flags, pe_flag_maintenance_mode)) { quiet_log("\n *** Resource management is DISABLED ***"); quiet_log("\n The cluster will not attempt to start, stop or recover services"); quiet_log("\n"); } if (data_set->disabled_resources || data_set->blocked_resources) { quiet_log("%d of %d resource instances DISABLED and %d BLOCKED " "from further action due to failure\n", data_set->disabled_resources, data_set->ninstances, data_set->blocked_resources); } quiet_log("\nCurrent cluster status:\n"); print_cluster_status(data_set, options); } if (modified) { quiet_log("Performing requested modifications\n"); modify_configuration(data_set, global_cib, quorum, watchdog, node_up, node_down, node_fail, op_inject, ticket_grant, ticket_revoke, ticket_standby, ticket_activate); rc = global_cib->cmds->query(global_cib, NULL, &input, cib_sync_call); if (rc != pcmk_ok) { fprintf(stderr, "Could not get modified CIB: %s\n", pcmk_strerror(rc)); goto done; } cleanup_calculations(data_set); data_set->input = input; get_date(data_set, true); if(xml_file) { set_bit(data_set->flags, pe_flag_sanitized); } set_bit(data_set->flags, pe_flag_stdout); cluster_status(data_set); } if (input_file != NULL) { rc = write_xml_file(input, input_file, FALSE); if (rc < 0) { fprintf(stderr, "Could not create '%s': %s\n", input_file, pcmk_strerror(rc)); goto done; } } if (process || simulate) { crm_time_t *local_date = NULL; if (show_scores && show_utilization) { printf("Allocation scores and utilization information:\n"); } else if (show_scores) { fprintf(stdout, "Allocation scores:\n"); } else if (show_utilization) { printf("Utilization information:\n"); } pcmk__schedule_actions(data_set, input, local_date); input = NULL; /* Don't try and free it twice */ if (graph_file != NULL) { write_xml_file(data_set->graph, graph_file, FALSE); } if (dot_file != NULL) { create_dotfile(data_set, dot_file, all_actions); } if (quiet == FALSE) { GListPtr gIter = NULL; quiet_log("%sTransition Summary:\n", show_scores || show_utilization || modified ? "\n" : ""); fflush(stdout); LogNodeActions(data_set, TRUE); for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; LogActions(rsc, data_set, TRUE); } } } rc = pcmk_ok; if (simulate) { if (run_simulation(data_set, global_cib, op_fail, quiet) != pcmk_ok) { rc = pcmk_err_generic; } if(quiet == FALSE) { get_date(data_set, true); quiet_log("\nRevised cluster status:\n"); set_bit(data_set->flags, pe_flag_stdout); cluster_status(data_set); print_cluster_status(data_set, 0); } } done: pe_free_working_set(data_set); global_cib->cmds->signoff(global_cib); cib_delete(global_cib); free(use_date); fflush(stderr); if (temp_shadow) { unlink(temp_shadow); free(temp_shadow); } crm_exit(crm_errno2exit(rc)); } diff --git a/tools/crm_ticket.c b/tools/crm_ticket.c index d10d498d67..703fdff6f7 100644 --- a/tools/crm_ticket.c +++ b/tools/crm_ticket.c @@ -1,1074 +1,1074 @@ /* * Copyright 2012-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include gboolean do_force = FALSE; gboolean BE_QUIET = FALSE; const char *ticket_id = NULL; const char *get_attr_name = NULL; const char *attr_name = NULL; const char *attr_value = NULL; const char *attr_id = NULL; const char *set_name = NULL; const char *attr_default = NULL; char ticket_cmd = 'S'; char *xml_file = NULL; int cib_options = cib_sync_call; static pe_ticket_t * find_ticket(const char *ticket_id, pe_working_set_t * data_set) { pe_ticket_t *ticket = NULL; ticket = g_hash_table_lookup(data_set->tickets, ticket_id); return ticket; } static void print_date(time_t time) { int lpc = 0; char date_str[26]; asctime_r(localtime(&time), date_str); for (; lpc < 26; lpc++) { if (date_str[lpc] == '\n') { date_str[lpc] = 0; } } fprintf(stdout, "'%s'", date_str); } static int print_ticket(pe_ticket_t * ticket, gboolean raw, gboolean details) { if (raw) { fprintf(stdout, "%s\n", ticket->id); return pcmk_ok; } fprintf(stdout, "%s\t%s %s", ticket->id, ticket->granted ? "granted" : "revoked", ticket->standby ? "[standby]" : " "); if (details && g_hash_table_size(ticket->state) > 0) { GHashTableIter iter; const char *name = NULL; const char *value = NULL; int lpc = 0; fprintf(stdout, " ("); g_hash_table_iter_init(&iter, ticket->state); while (g_hash_table_iter_next(&iter, (void **)&name, (void **)&value)) { if (lpc > 0) { fprintf(stdout, ", "); } fprintf(stdout, "%s=", name); if (crm_str_eq(name, "last-granted", TRUE) || crm_str_eq(name, "expires", TRUE)) { print_date(crm_parse_int(value, 0)); } else { fprintf(stdout, "%s", value); } lpc++; } fprintf(stdout, ")\n"); } else { if (ticket->last_granted > -1) { fprintf(stdout, " last-granted="); print_date(ticket->last_granted); } fprintf(stdout, "\n"); } return pcmk_ok; } static int print_ticket_list(pe_working_set_t * data_set, gboolean raw, gboolean details) { GHashTableIter iter; pe_ticket_t *ticket = NULL; g_hash_table_iter_init(&iter, data_set->tickets); while (g_hash_table_iter_next(&iter, NULL, (void **)&ticket)) { print_ticket(ticket, raw, details); } return pcmk_ok; } #define XPATH_MAX 1024 static int find_ticket_state(cib_t * the_cib, const char *ticket_id, xmlNode ** ticket_state_xml) { int offset = 0; int rc = pcmk_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; CRM_ASSERT(ticket_state_xml != NULL); *ticket_state_xml = NULL; xpath_string = calloc(1, XPATH_MAX); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", "/cib/status/tickets"); if (ticket_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s[@id=\"%s\"]", XML_CIB_TAG_TICKET_STATE, ticket_id); } CRM_LOG_ASSERT(offset > 0); rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); if (rc != pcmk_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); if (xml_has_children(xml_search)) { if (ticket_id) { fprintf(stdout, "Multiple ticket_states match ticket_id=%s\n", ticket_id); } *ticket_state_xml = xml_search; } else { *ticket_state_xml = xml_search; } bail: free(xpath_string); return rc; } static int find_ticket_constraints(cib_t * the_cib, const char *ticket_id, xmlNode ** ticket_cons_xml) { int offset = 0; int rc = pcmk_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; CRM_ASSERT(ticket_cons_xml != NULL); *ticket_cons_xml = NULL; xpath_string = calloc(1, XPATH_MAX); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s/%s", get_object_path(XML_CIB_TAG_CONSTRAINTS), XML_CONS_TAG_RSC_TICKET); if (ticket_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@ticket=\"%s\"]", ticket_id); } CRM_LOG_ASSERT(offset > 0); rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); if (rc != pcmk_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); *ticket_cons_xml = xml_search; bail: free(xpath_string); return rc; } static int dump_ticket_xml(cib_t * the_cib, const char *ticket_id) { int rc = pcmk_ok; xmlNode *state_xml = NULL; rc = find_ticket_state(the_cib, ticket_id, &state_xml); if (state_xml == NULL) { return rc; } fprintf(stdout, "State XML:\n"); if (state_xml) { char *state_xml_str = NULL; state_xml_str = dump_xml_formatted(state_xml); fprintf(stdout, "\n%s\n", crm_str(state_xml_str)); free_xml(state_xml); free(state_xml_str); } return pcmk_ok; } static int dump_constraints(cib_t * the_cib, const char *ticket_id) { int rc = pcmk_ok; xmlNode *cons_xml = NULL; char *cons_xml_str = NULL; rc = find_ticket_constraints(the_cib, ticket_id, &cons_xml); if (cons_xml == NULL) { return rc; } cons_xml_str = dump_xml_formatted(cons_xml); fprintf(stdout, "Constraints XML:\n\n%s\n", crm_str(cons_xml_str)); free_xml(cons_xml); free(cons_xml_str); return pcmk_ok; } static int get_ticket_state_attr(const char *ticket_id, const char *attr_name, const char **attr_value, pe_working_set_t * data_set) { pe_ticket_t *ticket = NULL; CRM_ASSERT(attr_value != NULL); *attr_value = NULL; ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { return -ENXIO; } *attr_value = g_hash_table_lookup(ticket->state, attr_name); if (*attr_value == NULL) { return -ENXIO; } return pcmk_ok; } static gboolean ticket_warning(const char *ticket_id, const char *action) { gboolean rc = FALSE; int offset = 0; static int text_max = 1024; char *warning = NULL; const char *word = NULL; warning = calloc(1, text_max); if (safe_str_eq(action, "grant")) { offset += snprintf(warning + offset, text_max - offset, "This command cannot help you verify whether '%s' has been already granted elsewhere.\n", ticket_id); word = "to"; } else { offset += snprintf(warning + offset, text_max - offset, "Revoking '%s' can trigger the specified 'loss-policy'(s) relating to '%s'.\n\n", ticket_id, ticket_id); offset += snprintf(warning + offset, text_max - offset, "You can check that with:\ncrm_ticket --ticket %s --constraints\n\n", ticket_id); offset += snprintf(warning + offset, text_max - offset, "Otherwise before revoking '%s', you may want to make '%s' standby with:\ncrm_ticket --ticket %s --standby\n\n", ticket_id, ticket_id, ticket_id); word = "from"; } offset += snprintf(warning + offset, text_max - offset, "If you really want to %s '%s' %s this site now, and you know what you are doing,\n", action, ticket_id, word); offset += snprintf(warning + offset, text_max - offset, "please specify --force."); CRM_LOG_ASSERT(offset > 0); fprintf(stdout, "%s\n", warning); free(warning); return rc; } static gboolean allow_modification(const char *ticket_id, GListPtr attr_delete, GHashTable *attr_set) { const char *value = NULL; GListPtr list_iter = NULL; if (do_force) { return TRUE; } if (g_hash_table_lookup_extended(attr_set, "granted", NULL, (gpointer *) & value)) { if (crm_is_true(value)) { ticket_warning(ticket_id, "grant"); return FALSE; } else { ticket_warning(ticket_id, "revoke"); return FALSE; } } for(list_iter = attr_delete; list_iter; list_iter = list_iter->next) { const char *key = (const char *)list_iter->data; if (safe_str_eq(key, "granted")) { ticket_warning(ticket_id, "revoke"); return FALSE; } } return TRUE; } static int modify_ticket_state(const char * ticket_id, GListPtr attr_delete, GHashTable * attr_set, cib_t * cib, pe_working_set_t * data_set) { int rc = pcmk_ok; xmlNode *xml_top = NULL; xmlNode *ticket_state_xml = NULL; gboolean found = FALSE; GListPtr list_iter = NULL; GHashTableIter hash_iter; char *key = NULL; char *value = NULL; pe_ticket_t *ticket = NULL; rc = find_ticket_state(cib, ticket_id, &ticket_state_xml); if (rc == pcmk_ok) { crm_debug("Found a match state for ticket: id=%s", ticket_id); xml_top = ticket_state_xml; found = TRUE; } else if (rc != -ENXIO) { return rc; } else if (g_hash_table_size(attr_set) == 0){ return pcmk_ok; } else { xmlNode *xml_obj = NULL; xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS); xml_obj = create_xml_node(xml_top, XML_CIB_TAG_TICKETS); ticket_state_xml = create_xml_node(xml_obj, XML_CIB_TAG_TICKET_STATE); crm_xml_add(ticket_state_xml, XML_ATTR_ID, ticket_id); } for(list_iter = attr_delete; list_iter; list_iter = list_iter->next) { const char *key = (const char *)list_iter->data; xml_remove_prop(ticket_state_xml, key); } ticket = find_ticket(ticket_id, data_set); g_hash_table_iter_init(&hash_iter, attr_set); while (g_hash_table_iter_next(&hash_iter, (gpointer *) & key, (gpointer *) & value)) { crm_xml_add(ticket_state_xml, key, value); if (safe_str_eq(key, "granted") && (ticket == NULL || ticket->granted == FALSE) && crm_is_true(value)) { char *now = crm_ttoa(time(NULL)); crm_xml_add(ticket_state_xml, "last-granted", now); free(now); } } if (found && (attr_delete != NULL)) { crm_log_xml_debug(xml_top, "Replace"); rc = cib->cmds->replace(cib, XML_CIB_TAG_STATUS, ticket_state_xml, cib_options); } else { crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, xml_top, cib_options); } free_xml(xml_top); return rc; } static int delete_ticket_state(const char *ticket_id, cib_t * cib) { xmlNode *ticket_state_xml = NULL; int rc = pcmk_ok; rc = find_ticket_state(cib, ticket_id, &ticket_state_xml); if (rc == -ENXIO) { return pcmk_ok; } else if (rc != pcmk_ok) { return rc; } crm_log_xml_debug(ticket_state_xml, "Delete"); rc = cib->cmds->remove(cib, XML_CIB_TAG_STATUS, ticket_state_xml, cib_options); if (rc == pcmk_ok) { fprintf(stdout, "Cleaned up %s\n", ticket_id); } free_xml(ticket_state_xml); return rc; } static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "help", no_argument, NULL, '?', "\t\tThis text", pcmk__option_default }, { "version", no_argument, NULL, '$', "\t\tVersion information", pcmk__option_default }, { "verbose", no_argument, NULL, 'V', "\t\tIncrease debug output", pcmk__option_default }, { "quiet", no_argument, NULL, 'Q', "\t\tPrint only the value on stdout\n", pcmk__option_default }, { "ticket", required_argument, NULL, 't', "\tTicket ID", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nQueries:", pcmk__option_default }, { "info", no_argument, NULL, 'l', "\t\tDisplay the information of ticket(s)", pcmk__option_default }, { "details", no_argument, NULL, 'L', "\t\tDisplay the details of ticket(s)", pcmk__option_default }, { "raw", no_argument, NULL, 'w', "\t\tDisplay the IDs of ticket(s)", pcmk__option_default }, { "query-xml", no_argument, NULL, 'q', "\tQuery the XML of ticket(s)", pcmk__option_default }, { "constraints", no_argument, NULL, 'c', "\tDisplay the rsc_ticket constraints that apply to ticket(s)", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nCommands:", pcmk__option_default }, { "grant", no_argument, NULL, 'g', "\t\tGrant a ticket to this cluster site", pcmk__option_default }, { "revoke", no_argument, NULL, 'r', "\t\tRevoke a ticket from this cluster site", pcmk__option_default }, { "standby", no_argument, NULL, 's', "\t\tTell this cluster site this ticket is standby", pcmk__option_default }, { "activate", no_argument, NULL, 'a', "\tTell this cluster site this ticket is active", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nAdvanced Commands:", pcmk__option_default }, { "get-attr", required_argument, NULL, 'G', "\tDisplay the named attribute for a ticket", pcmk__option_default }, { "set-attr", required_argument, NULL, 'S', "\tSet the named attribute for a ticket", pcmk__option_default }, { "delete-attr", required_argument, NULL, 'D', "\tDelete the named attribute for a ticket", pcmk__option_default }, { "cleanup", no_argument, NULL, 'C', "\t\tDelete all state of a ticket at this cluster site", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:", pcmk__option_default }, { "attr-value", required_argument, NULL, 'v', "\tAttribute value to use with -S", pcmk__option_default }, { "default", required_argument, NULL, 'd', "\t(Advanced) Default attribute value to display if none is found " "(for use with -G)", pcmk__option_default }, { "force", no_argument, NULL, 'f', "\t\t(Advanced) Force the action to be performed", pcmk__option_default }, { "xml-file", required_argument, NULL, 'x', NULL, pcmk__option_hidden }, /* legacy options */ { "set-name", required_argument, NULL, 'n', "\t(Advanced) ID of the instance_attributes object to change", pcmk__option_default }, { "nvpair", required_argument, NULL, 'i', "\t(Advanced) ID of the nvpair object to change/delete", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nExamples:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', "Display the info of tickets:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --info", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Display the detailed info of tickets:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --details", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Display the XML of 'ticketA':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --query-xml", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Display the rsc_ticket constraints that apply to 'ticketA':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --constraints", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Grant 'ticketA' to this cluster site:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --grant", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Revoke 'ticketA' from this cluster site:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --revoke", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Make 'ticketA' standby (the cluster site will treat a granted " "'ticketA' as 'standby', and the dependent resources will be " "stopped or demoted gracefully without triggering loss-policies):", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --standby", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Activate 'ticketA' from being standby:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --activate", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Get the value of the 'granted' attribute for 'ticketA':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --get-attr granted", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Set the value of the 'standby' attribute for 'ticketA':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --set-attr standby --attr-value true", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Delete the 'granted' attribute for 'ticketA':", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --delete-attr granted", pcmk__option_example }, { "-spacer-", no_argument, NULL, '-', "Erase the operation history of 'ticketA' at this cluster site:", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', "The cluster site will 'forget' the existing ticket state.", pcmk__option_paragraph }, { "-spacer-", no_argument, NULL, '-', " crm_ticket --ticket ticketA --cleanup", pcmk__option_example }, { 0, 0, 0, 0 } }; int main(int argc, char **argv) { pe_working_set_t *data_set = NULL; xmlNode *cib_xml_copy = NULL; xmlNode *cib_constraints = NULL; cib_t *cib_conn = NULL; crm_exit_t exit_code = CRM_EX_OK; int rc = pcmk_ok; int option_index = 0; int argerr = 0; int flag; guint modified = 0; GListPtr attr_delete = NULL; GHashTable *attr_set = crm_str_table_new(); crm_log_init(NULL, LOG_CRIT, FALSE, FALSE, argc, argv, FALSE); pcmk__set_cli_options(NULL, "| [options]", long_options, "perform tasks related to cluster tickets\n\n" "Allows ticket attributes to be queried, modified " "and deleted.\n"); if (argc < 2) { pcmk__cli_help('?', CRM_EX_USAGE); } while (1) { flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case '$': case '?': pcmk__cli_help(flag, CRM_EX_OK); break; case 'Q': BE_QUIET = TRUE; break; case 't': ticket_id = optarg; break; case 'l': case 'L': case 'w': case 'q': case 'c': ticket_cmd = flag; break; case 'g': g_hash_table_insert(attr_set, strdup("granted"), strdup("true")); modified++; break; case 'r': g_hash_table_insert(attr_set, strdup("granted"), strdup("false")); modified++; break; case 's': g_hash_table_insert(attr_set, strdup("standby"), strdup("true")); modified++; break; case 'a': g_hash_table_insert(attr_set, strdup("standby"), strdup("false")); modified++; break; case 'G': get_attr_name = optarg; ticket_cmd = flag; break; case 'S': attr_name = optarg; if (attr_name && attr_value) { g_hash_table_insert(attr_set, strdup(attr_name), strdup(attr_value)); attr_name = NULL; attr_value = NULL; modified++; } break; case 'D': attr_delete = g_list_append(attr_delete, optarg); modified++; break; case 'C': ticket_cmd = flag; break; case 'v': attr_value = optarg; if (attr_name && attr_value) { g_hash_table_insert(attr_set, strdup(attr_name), strdup(attr_value)); attr_name = NULL; attr_value = NULL; modified++; } break; case 'd': attr_default = optarg; break; case 'f': do_force = TRUE; break; case 'x': xml_file = strdup(optarg); break; case 'n': set_name = optarg; break; case 'i': attr_id = optarg; break; default: CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag); ++argerr; break; } } if (optind < argc && argv[optind] != NULL) { CMD_ERR("non-option ARGV-elements:"); while (optind < argc && argv[optind] != NULL) { CMD_ERR("%s", argv[optind++]); ++argerr; } } if (optind > argc) { ++argerr; } if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } data_set = pe_new_working_set(); if (data_set == NULL) { crm_perror(LOG_CRIT, "Could not allocate working set"); exit_code = CRM_EX_OSERR; goto bail; } set_bit(data_set->flags, pe_flag_no_counts); set_bit(data_set->flags, pe_flag_no_compat); cib_conn = cib_new(); if (cib_conn == NULL) { CMD_ERR("Could not connect to the CIB manager"); exit_code = CRM_EX_DISCONNECT; goto bail; } rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); if (rc != pcmk_ok) { - CMD_ERR("Could not connect to the CIB manager: %s", pcmk_strerror(rc)); + CMD_ERR("Could not connect to CIB: %s", pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto bail; } if (xml_file != NULL) { cib_xml_copy = filename2xml(xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { CMD_ERR("Could not get local CIB: %s", pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); goto bail; } } if (cli_config_update(&cib_xml_copy, NULL, FALSE) == FALSE) { CMD_ERR("Could not update local CIB to latest schema version"); exit_code = CRM_EX_CONFIG; goto bail; } data_set->input = cib_xml_copy; data_set->now = crm_time_new(NULL); cluster_status(data_set); /* For recording the tickets that are referenced in rsc_ticket constraints * but have never been granted yet. */ cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); unpack_constraints(cib_constraints, data_set); if (ticket_cmd == 'l' || ticket_cmd == 'L' || ticket_cmd == 'w') { gboolean raw = FALSE; gboolean details = FALSE; if (ticket_cmd == 'L') { details = TRUE; } else if (ticket_cmd == 'w') { raw = TRUE; } if (ticket_id) { pe_ticket_t *ticket = find_ticket(ticket_id, data_set); if (ticket == NULL) { CMD_ERR("No such ticket '%s'", ticket_id); exit_code = CRM_EX_NOSUCH; goto bail; } rc = print_ticket(ticket, raw, details); } else { rc = print_ticket_list(data_set, raw, details); } if (rc != pcmk_ok) { CMD_ERR("Could not print ticket: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'q') { rc = dump_ticket_xml(cib_conn, ticket_id); if (rc != pcmk_ok) { CMD_ERR("Could not query ticket XML: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'c') { rc = dump_constraints(cib_conn, ticket_id); if (rc != pcmk_ok) { CMD_ERR("Could not show ticket constraints: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'G') { const char *value = NULL; if (ticket_id == NULL) { CMD_ERR("Must supply ticket ID with -t"); exit_code = CRM_EX_NOSUCH; goto bail; } rc = get_ticket_state_attr(ticket_id, get_attr_name, &value, data_set); if (rc == pcmk_ok) { fprintf(stdout, "%s\n", value); } else if (rc == -ENXIO && attr_default) { fprintf(stdout, "%s\n", attr_default); rc = pcmk_ok; } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'C') { if (ticket_id == NULL) { CMD_ERR("Must supply ticket ID with -t"); exit_code = CRM_EX_USAGE; goto bail; } if (do_force == FALSE) { pe_ticket_t *ticket = NULL; ticket = find_ticket(ticket_id, data_set); if (ticket == NULL) { CMD_ERR("No such ticket '%s'", ticket_id); exit_code = CRM_EX_NOSUCH; goto bail; } if (ticket->granted) { ticket_warning(ticket_id, "revoke"); exit_code = CRM_EX_INSUFFICIENT_PRIV; goto bail; } } rc = delete_ticket_state(ticket_id, cib_conn); if (rc != pcmk_ok) { CMD_ERR("Could not clean up ticket: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (modified) { if (ticket_id == NULL) { CMD_ERR("Must supply ticket ID with -t"); exit_code = CRM_EX_USAGE; goto bail; } if (attr_value && (attr_name == NULL || strlen(attr_name) == 0)) { CMD_ERR("Must supply attribute name with -S for -v %s", attr_value); exit_code = CRM_EX_USAGE; goto bail; } if (attr_name && (attr_value == NULL || strlen(attr_value) == 0)) { CMD_ERR("Must supply attribute value with -v for -S %s", attr_name); exit_code = CRM_EX_USAGE; goto bail; } if (allow_modification(ticket_id, attr_delete, attr_set) == FALSE) { CMD_ERR("Ticket modification not allowed"); exit_code = CRM_EX_INSUFFICIENT_PRIV; goto bail; } rc = modify_ticket_state(ticket_id, attr_delete, attr_set, cib_conn, data_set); if (rc != pcmk_ok) { CMD_ERR("Could not modify ticket: %s", pcmk_strerror(rc)); } exit_code = crm_errno2exit(rc); } else if (ticket_cmd == 'S') { /* Correct usage was handled in the "if (modified)" block above, so * this is just for reporting usage errors */ if (attr_name == NULL || strlen(attr_name) == 0) { // We only get here if ticket_cmd was left as default CMD_ERR("Must supply a command"); exit_code = CRM_EX_USAGE; goto bail; } if (ticket_id == NULL) { CMD_ERR("Must supply ticket ID with -t"); exit_code = CRM_EX_USAGE; goto bail; } if (attr_value == NULL || strlen(attr_value) == 0) { CMD_ERR("Must supply value with -v for -S %s", attr_name); exit_code = CRM_EX_USAGE; goto bail; } } else { CMD_ERR("Unknown command: %c", ticket_cmd); exit_code = CRM_EX_USAGE; } bail: if (attr_set) { g_hash_table_destroy(attr_set); } attr_set = NULL; if (attr_delete) { g_list_free(attr_delete); } attr_delete = NULL; pe_free_working_set(data_set); data_set = NULL; if (cib_conn != NULL) { cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } if (rc == -pcmk_err_no_quorum) { CMD_ERR("Use --force to ignore quorum"); } crm_exit(exit_code); } diff --git a/tools/crmadmin.c b/tools/crmadmin.c index b2c48413ca..c58de59ed7 100644 --- a/tools/crmadmin.c +++ b/tools/crmadmin.c @@ -1,530 +1,532 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int message_timer_id = -1; static int message_timeout_ms = 30 * 1000; static GMainLoop *mainloop = NULL; static crm_ipc_t *crmd_channel = NULL; static char *admin_uuid = NULL; gboolean do_init(void); int do_work(void); void crmadmin_ipc_connection_destroy(gpointer user_data); int admin_msg_callback(const char *buffer, ssize_t length, gpointer userdata); int do_find_node_list(xmlNode * xml_node); gboolean admin_message_timeout(gpointer data); static gboolean BE_VERBOSE = FALSE; static int expected_responses = 1; static gboolean BASH_EXPORT = FALSE; static gboolean DO_HEALTH = FALSE; static gboolean DO_RESET = FALSE; static gboolean DO_RESOURCE = FALSE; static gboolean DO_ELECT_DC = FALSE; static gboolean DO_WHOIS_DC = FALSE; static gboolean DO_NODE_LIST = FALSE; static gboolean BE_SILENT = FALSE; static gboolean DO_RESOURCE_LIST = FALSE; static const char *crmd_operation = NULL; static char *dest_node = NULL; static crm_exit_t exit_code = CRM_EX_OK; static const char *sys_to = NULL; static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { "help", no_argument, NULL, '?', "\tThis text", pcmk__option_default }, { "version", no_argument, NULL, '$', "\tVersion information", pcmk__option_default }, { "quiet", no_argument, NULL, 'q', "\tDisplay only the essential query information", pcmk__option_default }, { "verbose", no_argument, NULL, 'V', "\tIncrease debug output", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\nCommands:", pcmk__option_default }, /* daemon options */ { "status", required_argument, NULL, 'S', "Display the status of the specified node.", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\n\tResult is state of node's internal finite state machine, which " "can be useful for debugging\n", pcmk__option_default }, { "dc_lookup", no_argument, NULL, 'D', "Display the uname of the node co-ordinating the cluster.", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "\n\tThis is an internal detail rarely useful to administrators " "except when deciding on which node to examine the logs.\n", pcmk__option_default }, { "nodes", no_argument, NULL, 'N', "\tDisplay the uname of all member nodes", pcmk__option_default }, { "election", no_argument, NULL, 'E', "(Advanced) Start an election for the cluster co-ordinator", pcmk__option_default }, { "kill", required_argument, NULL, 'K', "(Advanced) Stop controller (not rest of cluster stack) on " "specified node", pcmk__option_default }, { "health", no_argument, NULL, 'H', NULL, pcmk__option_hidden }, { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:", pcmk__option_default }, { XML_ATTR_TIMEOUT, required_argument, NULL, 't', "Time (in milliseconds) to wait before declaring the operation failed", pcmk__option_default }, { "bash-export", no_argument, NULL, 'B', "Create Bash export entries of the form 'export uname=uuid'\n", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', "Notes:", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', " The -K and -E commands are rarely used and may be removed in " "future versions.", pcmk__option_default }, { 0, 0, 0, 0 } }; int main(int argc, char **argv) { int option_index = 0; int argerr = 0; int flag; crm_log_cli_init("crmadmin"); pcmk__set_cli_options(NULL, " [options]", long_options, "query and manage the Pacemaker controller"); if (argc < 2) { pcmk__cli_help('?', CRM_EX_USAGE); } while (1) { flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); if (flag == -1) break; switch (flag) { case 'V': BE_VERBOSE = TRUE; crm_bump_log_level(argc, argv); break; case 't': message_timeout_ms = atoi(optarg); if (message_timeout_ms < 1) { message_timeout_ms = 30 * 1000; } break; case '$': case '?': pcmk__cli_help(flag, CRM_EX_OK); break; case 'D': DO_WHOIS_DC = TRUE; break; case 'B': BASH_EXPORT = TRUE; break; case 'K': DO_RESET = TRUE; crm_trace("Option %c => %s", flag, optarg); dest_node = strdup(optarg); crmd_operation = CRM_OP_LOCAL_SHUTDOWN; break; case 'q': BE_SILENT = TRUE; break; case 'S': DO_HEALTH = TRUE; crm_trace("Option %c => %s", flag, optarg); dest_node = strdup(optarg); break; case 'E': DO_ELECT_DC = TRUE; break; case 'N': DO_NODE_LIST = TRUE; break; case 'H': DO_HEALTH = TRUE; break; default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); } if (optind > argc) { ++argerr; } if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } if (do_init()) { int res = 0; res = do_work(); if (res > 0) { /* wait for the reply by creating a mainloop and running it until * the callbacks are invoked... */ mainloop = g_main_loop_new(NULL, FALSE); crm_trace("Waiting for %d replies from the local CRM", expected_responses); message_timer_id = g_timeout_add(message_timeout_ms, admin_message_timeout, NULL); g_main_loop_run(mainloop); } else if (res < 0) { crm_err("No message to send"); exit_code = CRM_EX_ERROR; } } else { crm_warn("Init failed, could not perform requested operations"); exit_code = CRM_EX_UNAVAILABLE; } crm_trace("%s exiting normally", crm_system_name); return exit_code; } int do_work(void) { int ret = 1; /* construct the request */ xmlNode *msg_data = NULL; gboolean all_is_good = TRUE; if (DO_HEALTH == TRUE) { crm_trace("Querying the system"); sys_to = CRM_SYSTEM_DC; if (dest_node != NULL) { sys_to = CRM_SYSTEM_CRMD; crmd_operation = CRM_OP_PING; if (BE_VERBOSE) { expected_responses = 1; } } else { crm_info("Cluster-wide health not available yet"); all_is_good = FALSE; } } else if (DO_ELECT_DC) { /* tell the local node to initiate an election */ dest_node = NULL; sys_to = CRM_SYSTEM_CRMD; crmd_operation = CRM_OP_VOTE; ret = 0; /* no return message */ } else if (DO_WHOIS_DC) { dest_node = NULL; sys_to = CRM_SYSTEM_DC; crmd_operation = CRM_OP_PING; } else if (DO_NODE_LIST) { cib_t *the_cib = cib_new(); xmlNode *output = NULL; int rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); if (rc != pcmk_ok) { + fprintf(stderr, "Could not connect to CIB: %s\n", + pcmk_strerror(rc)); return -1; } rc = the_cib->cmds->query(the_cib, NULL, &output, cib_scope_local | cib_sync_call); if(rc == pcmk_ok) { do_find_node_list(output); free_xml(output); } the_cib->cmds->signoff(the_cib); crm_exit(crm_errno2exit(rc)); } else if (DO_RESET) { /* tell dest_node to initiate the shutdown procedure * * if dest_node is NULL, the request will be sent to the * local node */ sys_to = CRM_SYSTEM_CRMD; ret = 0; /* no return message */ } else { crm_err("Unknown options"); all_is_good = FALSE; } if (all_is_good == FALSE) { crm_err("Creation of request failed. No message to send"); return -1; } /* send it */ if (crmd_channel == NULL) { crm_err("The IPC connection is not valid, cannot send anything"); return -1; } if (sys_to == NULL) { if (dest_node != NULL) { sys_to = CRM_SYSTEM_CRMD; } else { sys_to = CRM_SYSTEM_DC; } } { xmlNode *cmd = create_request(crmd_operation, msg_data, dest_node, sys_to, crm_system_name, admin_uuid); crm_ipc_send(crmd_channel, cmd, 0, 0, NULL); free_xml(cmd); } return ret; } void crmadmin_ipc_connection_destroy(gpointer user_data) { crm_err("Connection to controller was terminated"); if (mainloop) { g_main_loop_quit(mainloop); } else { crm_exit(CRM_EX_DISCONNECT); } } struct ipc_client_callbacks crm_callbacks = { .dispatch = admin_msg_callback, .destroy = crmadmin_ipc_connection_destroy }; gboolean do_init(void) { mainloop_io_t *source = mainloop_add_ipc_client(CRM_SYSTEM_CRMD, G_PRIORITY_DEFAULT, 0, NULL, &crm_callbacks); admin_uuid = pcmk__getpid_s(); crmd_channel = mainloop_get_ipc_client(source); if (DO_RESOURCE || DO_RESOURCE_LIST || DO_NODE_LIST) { return TRUE; } else if (crmd_channel != NULL) { xmlNode *xml = create_hello_message(admin_uuid, crm_system_name, "0", "1"); crm_ipc_send(crmd_channel, xml, 0, 0, NULL); return TRUE; } return FALSE; } static bool validate_crm_message(xmlNode * msg, const char *sys, const char *uuid, const char *msg_type) { const char *type = NULL; const char *crm_msg_reference = NULL; if (msg == NULL) { return FALSE; } type = crm_element_value(msg, F_CRM_MSG_TYPE); crm_msg_reference = crm_element_value(msg, XML_ATTR_REFERENCE); if (type == NULL) { crm_info("No message type defined."); return FALSE; } else if (msg_type != NULL && strcasecmp(msg_type, type) != 0) { crm_info("Expecting a (%s) message but received a (%s).", msg_type, type); return FALSE; } if (crm_msg_reference == NULL) { crm_info("No message crm_msg_reference defined."); return FALSE; } return TRUE; } int admin_msg_callback(const char *buffer, ssize_t length, gpointer userdata) { static int received_responses = 0; xmlNode *xml = string2xml(buffer); received_responses++; g_source_remove(message_timer_id); crm_log_xml_trace(xml, "ipc"); if (xml == NULL) { crm_info("XML in IPC message was not valid... " "discarding."); } else if (validate_crm_message(xml, crm_system_name, admin_uuid, XML_ATTR_RESPONSE) == FALSE) { crm_trace("Message was not a CRM response. Discarding."); } else if (DO_HEALTH) { xmlNode *data = get_message_xml(xml, F_CRM_DATA); const char *state = crm_element_value(data, XML_PING_ATTR_CRMDSTATE); printf("Status of %s@%s: %s (%s)\n", crm_element_value(data, XML_PING_ATTR_SYSFROM), crm_element_value(xml, F_CRM_HOST_FROM), state, crm_element_value(data, XML_PING_ATTR_STATUS)); if (BE_SILENT && state != NULL) { fprintf(stderr, "%s\n", state); } } else if (DO_WHOIS_DC) { const char *dc = crm_element_value(xml, F_CRM_HOST_FROM); printf("Designated Controller is: %s\n", dc); if (BE_SILENT && dc != NULL) { fprintf(stderr, "%s\n", dc); } crm_exit(CRM_EX_OK); } free_xml(xml); if (received_responses >= expected_responses) { crm_trace("Received expected number (%d) of replies, exiting normally", expected_responses); crm_exit(CRM_EX_OK); } message_timer_id = g_timeout_add(message_timeout_ms, admin_message_timeout, NULL); return 0; } gboolean admin_message_timeout(gpointer data) { fprintf(stderr, "No messages received in %d seconds.. aborting\n", (int)message_timeout_ms / 1000); crm_err("No messages received in %d seconds", (int)message_timeout_ms / 1000); exit_code = CRM_EX_TIMEOUT; g_main_loop_quit(mainloop); return FALSE; } int do_find_node_list(xmlNode * xml_node) { int found = 0; xmlNode *node = NULL; xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); for (node = __xml_first_child_element(nodes); node != NULL; node = __xml_next_element(node)) { if (crm_str_eq((const char *)node->name, XML_CIB_TAG_NODE, TRUE)) { if (BASH_EXPORT) { printf("export %s=%s\n", crm_element_value(node, XML_ATTR_UNAME), crm_element_value(node, XML_ATTR_ID)); } else { printf("%s node: %s (%s)\n", crm_element_value(node, XML_ATTR_TYPE), crm_element_value(node, XML_ATTR_UNAME), crm_element_value(node, XML_ATTR_ID)); } found++; } } if (found == 0) { printf("NO nodes configured\n"); } return found; }