diff --git a/.travis.yml b/.travis.yml index 9db943b3f5..3266743bbb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,104 +1,101 @@ # Copyright 2012-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # Control file for the Travis autobuilder # https://docs.travis-ci.com/user/customizing-the-build/ language: c # We build with both gcc and clang. If MAINT_EXTRA=1 (gcc only), the # schema regression tests will additionally be run. matrix: include: - compiler: gcc env: MAINT_EXTRA=1 - compiler: clang env: MAINT_EXTRA=0 cache: directories: - xml/.relaxng.org # sudo add-apt-repository ppa:hotot-team before_install: - sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu/ trusty main" - sudo apt-get update -qq # To switch to Travis-CI's containerized (non-sudo) architecture, # all our dependencies need to be on Travis's whitelist: # https://github.com/travis-ci/apt-package-whitelist # # The only ones that aren't already are: # - cluster-glue-dev: see open issue: # https://github.com/travis-ci/apt-package-whitelist/issues/2936 # - resource-agents: see open issue: # https://github.com/travis-ci/apt-package-whitelist/issues/4261 # - libdbus-1-dev: see multiple open issues: # https://github.com/travis-ci/apt-package-whitelist/issues?utf8=%E2%9C%93&q=is%3Aissue+libdbus+-1-dev # (a workaround is to install libdbus-glib-1-dev, which depends on it and is whitelisted) install: - sudo apt-get install -qq automake autoconf libtool python python-dev libbz2-dev libdbus-1-dev libglib2.0-dev libgnutls-dev libltdl-dev libncurses5-dev libpam0g-dev libxml2-dev libxslt1-dev uuid-dev libqb-dev libcfg-dev libcmap-dev libcorosync-common-dev libcpg-dev libquorum-dev libsam-dev libtotem-pg-dev libvotequorum-dev cluster-glue-dev resource-agents - test $MAINT_EXTRA -eq 0 || sudo apt-get install -qq libxml2-utils xsltproc before_script: # some tests (e.g. cts-exec-helper) require actual system-wide credentials - - sed -e 's|^\(CRM_DAEMON_USER=\).*$|\1nobody|' - -e 's|^\(CRM_DAEMON_GROUP=\).*$|\1nobody|' - -i -- configure.ac - ./autogen.sh - - ./configure + - ./configure --with-daemon-user=nobody --with-daemon-group=nobody script: # Create directories needed by commands used by regression tests - sudo make install-exec-local || true - make - ./cts/cts-regression -V cli scheduler exec - test $MAINT_EXTRA -eq 0 || { { echo 'looking for presence of control characters...'; { git ls-files | xargs grep -Ensv "^([^[:cntrl:]]*|$(printf '\t'))*$"||:; } 2>/dev/null | { ! grep -Ev '^Binary file' && echo 'ALL OK'; }; } && ( cd xml; ./regression.sh && ./regression.sh -B && ./regression.sh -S && { schemas=; for schema in *.rng; do case ${schema} in *cibtr*);; *)schemas="${schemas} ${schema}";; esac; done; test -s .relaxng.org/relaxng.rng 2>/dev/null || curl --create-dirs -SsLo .relaxng.org/relaxng.rng 'https://raw.githubusercontent.com/relaxng/relaxng.org/master/relaxng.rng'; xmllint --noout --relaxng .relaxng.org/relaxng.rng ${schemas}; } ); } #after_script: #after_success: after_failure: - lsb_release -a - sudo cat /etc/apt/sources.list - whoami - env | sort - cat include/config.h notifications: irc: "irc.freenode.org#pcmk" # email: # recipients: # - developers@clusterlabs.org # whitelist branches: only: - master - "1.1" - "2.0" diff --git a/GNUmakefile b/GNUmakefile index 8f1049a620..2e58226932 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,417 +1,416 @@ # # Copyright 2008-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # default: build .PHONY: default +# The toplevel "clean" targets are generated from Makefile.am, not this file. +# We can't use autotools' CLEANFILES, clean-local, etc. here. Instead, we +# define this target, which Makefile.am can use as a dependency of clean-local. +EXTRA_CLEAN_TARGETS = ancillary-clean + -include Makefile # The main purpose of this GNUmakefile is that its targets can be invoked # without having to call autogen.sh and configure first. That means automake # variables may or may not be defined. Here, we use the current working # directory if a relevant variable hasn't been defined. # # The idea is to keep generated artifacts in the build tree, in case a VPATH # build is in use, but in practice it would be difficult to make the targets # here usable from a different location than the source tree. abs_srcdir ?= $(shell pwd) abs_builddir ?= $(shell pwd) PACKAGE ?= pacemaker # Definitions that specify what various targets will apply to COMMIT ?= HEAD TAG ?= $(shell T=$$(git describe --all '$(COMMIT)' 2>/dev/null | sed -n 's|tags/\(.*\)|\1|p'); \ test -n "$${T}" && echo "$${T}" \ || git log --pretty=format:%H -n 1 '$(COMMIT)' 2>/dev/null || echo DIST) lparen = ( rparen = ) SHORTTAG ?= $(shell case $(TAG) in Pacemaker-*|DIST$(rparen) echo '$(TAG)' | cut -c11-;; \ *$(rparen) git log --pretty=format:%h -n 1 '$(TAG)';; esac) SHORTTAG_ABBREV = $(shell printf %s '$(SHORTTAG)' | wc -c) LAST_RC ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep rc | head -n 1) ifneq ($(origin VERSION), undefined) LAST_RELEASE ?= Pacemaker-$(VERSION) else LAST_RELEASE ?= $(shell git tag -l | grep Pacemaker | sort -Vr | grep -v rc | head -n 1) endif NEXT_RELEASE ?= $(shell echo $(LAST_RELEASE) | awk -F. '/[0-9]+\./{$$3+=1;OFS=".";print $$1,$$2,$$3}') # This Makefile can create 2 types of distributions: # # - "make dist" is automake's native functionality, based on the various # dist/nodist make variables; it always uses the current sources # # - "make export" is a custom target based on git archive and relevant entries # from .gitattributes; it defaults to current sources but can use any git tag # # Both types use the TARFILE name for the result, though they generate # different contents. distdir = $(PACKAGE)-$(SHORTTAG) TARFILE = $(abs_builddir)/$(PACKAGE)-$(SHORTTAG).tar.gz .PHONY: init init: test -e $(top_srcdir)/configure || ./autogen.sh test -e $(abs_builddir)/Makefile || $(abs_builddir)/configure .PHONY: build build: init $(MAKE) $(AM_MAKEFLAGS) core export: if [ ! -f "$(TARFILE)" ]; then \ if [ $(TAG) = dirty ]; then \ git commit -m "DO-NOT-PUSH" -a; \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" HEAD^{tree}; \ git reset --mixed HEAD^; \ else \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" $(TAG)^{tree}; \ fi; \ echo "`date`: Rebuilt $(TARFILE)"; \ else \ echo "`date`: Using existing tarball: $(TARFILE)"; \ fi -export-clean: - -rm -f $(abs_builddir)/$(PACKAGE)-*.tar.gz - ## RPM-related targets # Where to put RPM artifacts; possible values: # # - toplevel (default): RPM sources, spec, and source rpm in top-level build # directory (everything else uses the usual defaults) # # - subtree: RPM sources (i.e. TARFILE) in top-level build directory, # everything else in dedicated "rpmbuild" subdirectory of build tree RPMDEST ?= toplevel RPM_SPEC_DIR_toplevel = $(abs_builddir) RPM_SRCRPM_DIR_toplevel = $(abs_builddir) RPM_OPTS_toplevel = --define "_sourcedir $(abs_builddir)" \ --define "_specdir $(RPM_SPEC_DIR_toplevel)" \ --define "_srcrpmdir $(RPM_SRCRPM_DIR_toplevel)" RPM_SPEC_DIR_subtree = $(abs_builddir)/rpm/SPECS RPM_SRCRPM_DIR_subtree = $(abs_builddir)/rpm/SRPMS RPM_OPTS_subtree = --define "_sourcedir $(abs_builddir)" \ --define "_topdir $(abs_builddir)/rpm" RPM_SPEC_DIR = $(RPM_SPEC_DIR_$(RPMDEST)) RPM_SRCRPM_DIR = $(RPM_SRCRPM_DIR_$(RPMDEST)) RPM_OPTS = $(RPM_OPTS_$(RPMDEST)) WITH ?= --without doc BUILD_COUNTER ?= build.counter LAST_COUNT = $(shell test ! -e $(BUILD_COUNTER) && echo 0; test -e $(BUILD_COUNTER) && cat $(BUILD_COUNTER)) COUNT = $(shell expr 1 + $(LAST_COUNT)) SPECVERSION ?= $(COUNT) MOCK_DIR = $(abs_builddir)/mock MOCK_OPTIONS ?= --resultdir=$(MOCK_DIR) --no-cleanup-after F ?= $(shell test ! -e /etc/fedora-release && echo 0; test -e /etc/fedora-release && rpm --eval %{fedora}) ARCH ?= $(shell test ! -e /etc/fedora-release && uname -m; test -e /etc/fedora-release && rpm --eval %{_arch}) MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH)) # rpmbuild wrapper that translates "--with[out] FEATURE" into RPM macros # # Unfortunately, at least recent versions of rpm do not support mentioned # switch. To work this around, we can emulate mechanism that rpm uses # internally: unfold the flags into respective macro definitions: # # --with[out] FOO -> --define "_with[out]_FOO --with[out]-FOO" # # $(1) ... WITH string (e.g., --with pre_release --without doc) # $(2) ... options following the initial "rpmbuild" in the command # $(3) ... final arguments determined with $2 (e.g., pacemaker.spec) # # Note that if $(3) is a specfile, extra case is taken so as to reflect # pcmkversion correctly (using in-place modification). # # Also note that both ways to specify long option with an argument # (i.e., what getopt and, importantly, rpm itself support) can be used: # # --with FOO # --with=FOO rpmbuild-with = \ WITH=$$(getopt -o "" -l with:,without: -- $(1)) || exit 1; \ CMD='rpmbuild $(2)'; PREREL=0; \ eval set -- "$${WITH}"; \ while true; do \ case "$$1" in \ --with) CMD="$${CMD} --define \"_with_$$2 --with-$$2\""; \ [ "$$2" != pre_release ] || PREREL=1; shift 2;; \ --without) CMD="$${CMD} --define \"_without_$$2 --without-$$2\""; \ [ "$$2" != pre_release ] || PREREL=0; shift 2;; \ --) shift ; break ;; \ *) echo "cannot parse WITH: $$1"; exit 1;; \ esac; \ done; \ case "$(3)" in \ *.spec) { [ $${PREREL} -eq 0 ] || [ $(LAST_RELEASE) = $(TAG) ]; } \ && sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3) \ || sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(NEXT_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3);; \ esac; \ CMD="$${CMD} $(3)"; \ eval "$${CMD}" $(RPM_SPEC_DIR)/$(PACKAGE).spec: rpm/pacemaker.spec.in $(AM_V_at)$(MKDIR_P) $(RPM_SPEC_DIR) # might not exist in VPATH build $(AM_V_GEN)if [ x != x"`git ls-files -m rpm/pacemaker.spec.in 2>/dev/null`" ]; then \ cat $(abs_srcdir)/rpm/pacemaker.spec.in; \ elif git cat-file -e $(TAG):rpm/pacemaker.spec.in 2>/dev/null; then \ git show $(TAG):rpm/pacemaker.spec.in; \ elif git cat-file -e $(TAG):pacemaker.spec.in 2>/dev/null; then \ git show $(TAG):pacemaker.spec.in; \ else \ cat $(abs_srcdir)/rpm/pacemaker.spec.in; \ fi | sed \ -e 's/global\ specversion\ .*/global\ specversion\ $(SPECVERSION)/' \ -e 's/global\ commit\ .*/global\ commit\ $(SHORTTAG)/' \ -e 's/global\ commit_abbrev\ .*/global\ commit_abbrev\ $(SHORTTAG_ABBREV)/' \ -e "s/PACKAGE_DATE/$$(date +'%a %b %d %Y')/" \ -e "s/PACKAGE_VERSION/$$(git describe --tags $(TAG) | sed -e s:Pacemaker-:: -e s:-.*::)/" \ > "$@" .PHONY: $(PACKAGE).spec $(PACKAGE).spec: $(RPM_SPEC_DIR)/$(PACKAGE).spec .PHONY: spec-clean spec-clean: -rm -f $(RPM_SPEC_DIR)/$(PACKAGE).spec .PHONY: srpm srpm: export srpm-clean $(RPM_SPEC_DIR)/$(PACKAGE).spec if [ -e $(BUILD_COUNTER) ]; then \ echo $(COUNT) > $(BUILD_COUNTER); \ fi $(call rpmbuild-with,$(WITH),-bs $(RPM_OPTS),$(RPM_SPEC_DIR)/$(PACKAGE).spec) .PHONY: srpm-clean srpm-clean: -rm -f $(RPM_SRCRPM_DIR)/*.src.rpm .PHONY: chroot chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG) @echo "Done" .PHONY: mock-next mock-next: $(MAKE) $(AM_MAKEFLAGS) F=$(shell expr 1 + $(F)) mock .PHONY: mock-rawhide mock-rawhide: $(MAKE) $(AM_MAKEFLAGS) F=rawhide mock mock-install-%: @echo "Installing packages" mock --root=$* $(MOCK_OPTIONS) --install $(MOCK_DIR)/*.rpm \ vi sudo valgrind lcov gdb fence-agents psmisc .PHONY: mock-install mock-install: mock-install-$(MOCK_CFG) @echo "Done" .PHONY: mock-sh mock-sh: mock-sh-$(MOCK_CFG) @echo "Done" mock-sh-%: @echo "Connecting" mock --root=$* $(MOCK_OPTIONS) --shell @echo "Done" mock-%: srpm mock-clean mock $(MOCK_OPTIONS) --root=$* --no-cleanup-after --rebuild \ $(WITH) $(RPM_SRCRPM_DIR)/*.src.rpm .PHONY: mock mock: mock-$(MOCK_CFG) @echo "Done" .PHONY: dirty dirty: $(MAKE) $(AM_MAKEFLAGS) TAG=dirty mock .PHONY: mock-clean mock-clean: -rm -rf $(MOCK_DIR) .PHONY: rpm-dep rpm-dep: $(RPM_SPEC_DIR)/$(PACKAGE).spec sudo yum-builddep $(PACKAGE).spec # e.g. make WITH="--with pre_release" rpm .PHONY: rpm rpm: srpm @echo To create custom builds, edit the flags and options in $(PACKAGE).spec first $(call rpmbuild-with,$(WITH),$(RPM_OPTS),--rebuild $(RPM_SRCRPM_DIR)/*.src.rpm) .PHONY: rpmlint rpmlint: $(RPM_SPEC_DIR)/$(PACKAGE).spec rpmlint -f rpm/rpmlintrc "$<" .PHONY: release release: $(MAKE) $(AM_MAKEFLAGS) TAG=$(LAST_RELEASE) rpm .PHONY: rc rc: $(MAKE) $(AM_MAKEFLAGS) TAG=$(LAST_RC) rpm ## Static analysis via coverity # Aggressiveness (low, medium, or high) COVLEVEL ?= low # Generated outputs COVERITY_DIR = $(abs_builddir)/coverity-$(TAG) COVTAR = $(abs_builddir)/$(PACKAGE)-coverity-$(TAG).tgz COVEMACS = $(abs_builddir)/$(TAG).coverity COVHTML = $(COVERITY_DIR)/output/errors # Coverity outputs are phony so they get rebuilt every invocation .PHONY: $(COVERITY_DIR) $(COVERITY_DIR): init core-clean coverity-clean $(AM_V_GEN)cov-build --dir "$@" $(MAKE) $(AM_MAKEFLAGS) core # Public coverity instance .PHONY: $(COVTAR) $(COVTAR): $(COVERITY_DIR) $(AM_V_GEN)tar czf "$@" --transform="s@.*$(TAG)@cov-int@" "$<" .PHONY: coverity coverity: $(COVTAR) @echo "Now go to https://scan.coverity.com/users/sign_in and upload:" @echo " $(COVTAR)" @echo "then make core-clean coverity-clean" # Licensed coverity instance # # The prerequisites are a little hacky; rather than actually required, some # of them are designed so that things execute in the proper order (which is # not the same as GNU make's order-only prerequisites). .PHONY: coverity-analyze coverity-analyze: $(COVERITY_DIR) @echo "" @echo "Analyzing (waiting for coverity license if necessary) ..." cov-analyze --dir "$<" --wait-for-license --security \ --aggressiveness-level "$(COVLEVEL)" .PHONY: $(COVEMACS) $(COVEMACS): coverity-analyze $(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --emacs-style > "$@" .PHONY: $(COVHTML) $(COVHTML): $(COVEMACS) $(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --html-output "$@" .PHONY: coverity-corp coverity-corp: $(COVHTML) $(MAKE) $(AM_MAKEFLAGS) core-clean @echo "Done. See:" @echo " file://$(abs_builddir)/$(COVHTML)/index.html" @echo "When no longer needed, make coverity-clean" # Remove all outputs regardless of tag .PHONY: coverity-clean coverity-clean: -rm -rf "$(abs_builddir)"/coverity-* \ "$(abs_builddir)"/$(PACKAGE)-coverity-*.tgz \ "$(abs_builddir)"/*.coverity ## Change log generation summary: @printf "\n* `date +"%a %b %d %Y"` `git config user.name` <`git config user.email`> $(NEXT_RELEASE)" @printf "\n- Changesets: `git log --pretty=oneline $(LAST_RELEASE)..HEAD | wc -l`" @printf "\n- Diff: " @git diff $(LAST_RELEASE)..HEAD --shortstat include lib daemons tools xml rc-changes: @$(MAKE) $(AM_MAKEFLAGS) NEXT_RELEASE=$(shell echo $(LAST_RC) | sed s:-rc.*::) LAST_RELEASE=$(LAST_RC) changes changes: summary @printf "\n- Features added since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e Feature: | sed -e 's@Feature:@@' | sort -uf @printf "\n- Changes since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --no-merges --abbrev-commit $(LAST_RELEASE)..HEAD \ | grep -e High: -e Fix: -e Bug | sed \ -e 's@\(Fix\|High\|Bug\):@@' \ -e 's@\(cib\|pacemaker-based\|based\):@CIB:@' \ -e 's@\(crmd\|pacemaker-controld\|controld\):@controller:@' \ -e 's@\(lrmd\|pacemaker-execd\|execd\):@executor:@' \ -e 's@\(Fencing\|stonithd\|stonith\|pacemaker-fenced\|fenced\):@fencing:@' \ -e 's@\(PE\|pengine\|pacemaker-schedulerd\|schedulerd\):@scheduler:@' \ | sort -uf authors: git log $(LAST_RELEASE)..$(COMMIT) --format='%an' | sort -u changelog: @$(MAKE) $(AM_MAKEFLAGS) changes > ChangeLog @printf "\n">> ChangeLog git show $(LAST_RELEASE):ChangeLog >> ChangeLog DO_NOT_INDENT = lib/gnu daemons/controld/controld_fsa.h indent: find . -name "*.[ch]" -exec ./p-indent \{\} \; git co HEAD $(DO_NOT_INDENT) rel-tags: tags find . -name TAGS -exec sed -i 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \; CLANG_analyzer = $(shell which scan-build) CLANG_checkers = # Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.: # --enable={warning,style,performance,portability,information,all} # --inconclusive --std=posix CPPCHECK_ARGS ?= cppcheck: cppcheck $(CPPCHECK_ARGS) -I include --max-configs=25 -q replace lib daemons tools clang: test -e $(CLANG_analyzer) scan-build $(CLANG_checkers:%=-enable-checker %) $(MAKE) $(AM_MAKEFLAGS) clean all # V3 = scandir unsetenv alphasort xalloc # V2 = setenv strerror strchrnul strndup # https://www.gnu.org/software/gnulib/manual/html_node/Initial-import.html#Initial-import # previously, this was crypto/md5, but got spoiled with streams/kernel crypto GNU_MODS = crypto/md5-buffer # stdint appears to be surrogate only for C99-lacking environments GNU_MODS_AVOID = stdint # only for plain crypto/md5: we make do without kernel-assisted crypto # GNU_MODS_AVOID += crypto/af_alg gnulib-update: -test -e maint/gnulib \ || git clone https://git.savannah.gnu.org/git/gnulib.git maint/gnulib cd maint/gnulib && git pull maint/gnulib/gnulib-tool \ --source-base=lib/gnu --lgpl=2 --no-vc-files --no-conditional-dependencies \ $(GNU_MODS_AVOID:%=--avoid %) --import $(GNU_MODS) -# The toplevel "clean" targets are generated from Makefile.am, not this file. -# We can't use autotools' CLEANFILES, clean-local, etc. here. Instead, we -# define this target, which Makefile.am can call from clean-local. -ancillary-clean: export-clean spec-clean srpm-clean mock-clean coverity-clean +ancillary-clean: spec-clean srpm-clean mock-clean coverity-clean -rm -f $(TARFILE) diff --git a/Makefile.am b/Makefile.am index 149aceee47..3f0c5c8259 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,113 +1,125 @@ # # Copyright 2003-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # EXTRA_DIST = CONTRIBUTING.md \ GNUmakefile \ INSTALL.md \ README.markdown \ autogen.sh \ m4/gnulib-cache.m4 \ m4/gnulib-tool.m4 \ rpm/rpmlintrc \ rpm/pacemaker.spec.in DISTCLEANFILES = config.status MAINTAINERCLEANFILES = Makefile.in \ aclocal.m4 \ config.guess \ config.sub \ configure \ depcomp \ install-sh \ ltmain.sh \ missing \ py-compile \ test-driver # Disable building Publican documentation when doing "make distcheck", because # some of our book sources are in the source directory, while others are # dynamically generated in the build directory, and publican can't handle that. # # In a non-VPATH build, doc isn't entered with a plain "make" because the # GNUmakefile sets "core" as the default target. However in a VPATH build, # there is no GNUmakefile, so "all" becomes the default target. # +# Also, don't try to install files outside the build directory. +# # @TODO To support VPATH builds for Publican, we could use the same "copy all # static inputs into the build tree" trick that xml/Makefile.am uses for # static schema files. -AM_DISTCHECK_CONFIGURE_FLAGS = --with-brand="" +AM_DISTCHECK_CONFIGURE_FLAGS = --with-brand="" \ + --prefix="$$dc_install_base/usr" \ + --sysconfdir="$$dc_install_base/etc" \ + --with-initdir="$$dc_install_base/etc/init.d" \ + --with-ocfdir="$$dc_install_base/usr/lib/ocf" \ + --with-systemdsystemunitdir="$$dc_install_base$(systemdsystemunitdir)" # Only these will get installed with a plain "make install" CORE_INSTALL = replace include lib daemons tools xml # Only these will get built with a plain "make" or "make clean" CORE = $(CORE_INSTALL) cts SUBDIRS = $(CORE) doc extra maint AM_CPPFLAGS = -I$(top_srcdir)/include doc_DATA = README.markdown COPYING licensedir = $(docdir)/licenses/ dist_license_DATA = $(wildcard licenses/*) # Scratch file for ad-hoc testing -noinst_PROGRAMS = scratch +EXTRA_PROGRAMS = scratch nodist_scratch_SOURCES = scratch.c -scratch_LDADD = $(top_builddir)/lib/common/libcrmcommon.la -lm - -scratch.c: - echo 'int main(void){}' >$@ +scratch_LDADD = $(top_builddir)/lib/common/libcrmcommon.la core: @echo "Building only core components and tests: $(CORE)" @for subdir in $(CORE); do \ echo "Building $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir all || exit 1; \ done core-install: @echo "Installing only core components: $(CORE_INSTALL)" @for subdir in $(CORE_INSTALL); do \ echo "Installing $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir install || exit 1; \ done core-clean: @echo "Cleaning only core components and tests: $(CORE)" @for subdir in $(CORE); do \ echo "Cleaning $$subdir"; \ $(MAKE) $(AM_MAKEFLAGS) -C $$subdir clean || exit 1; \ done install-exec-local: $(INSTALL) -d -m 750 $(DESTDIR)/$(PACEMAKER_CONFIG_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CONFIG_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CORE_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_BLACKBOX_DIR) $(INSTALL) -d -m 770 $(DESTDIR)/$(CRM_LOG_DIR) $(INSTALL) -d -m 770 $(DESTDIR)/$(CRM_BUNDLE_DIR) -chgrp $(CRM_DAEMON_GROUP) $(DESTDIR)/$(PACEMAKER_CONFIG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CONFIG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CORE_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_BLACKBOX_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_LOG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_BUNDLE_DIR) # Use chown because the user/group may not exist clean-generic: -rm -f *.tar.bz2 *.sed -clean-local: - $(MAKE) $(AM_MAKEFLAGS) ancillary-clean +PACKAGE ?= pacemaker + +# In a normal build, this file is included by GNUmakefile, which serves as the +# "real" makefile. But in a VPATH build, GNUmakefile won't exist in the build +# tree, and this file will be the "real" makefile. EXTRA_CLEAN_TARGETS handles +# both cases: GNUmakefile defines it before including this file, so the +# clean-local target can clean up files created by GNUmakefile targets. +# If this file is used alone, the variable will be undefined. +clean-local: $(EXTRA_CLEAN_TARGETS) + -rm -f scratch $(builddir)/$(PACKAGE)-*.tar.gz distclean-local: -rm -rf libltdl autom4te.cache diff --git a/configure.ac b/configure.ac index aa2cd656bd..8b7278d48a 100644 --- a/configure.ac +++ b/configure.ac @@ -1,1932 +1,2024 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2019 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) AC_CONFIG_MACRO_DIR([m4]) AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) -AC_ARG_WITH(version, - [ --with-version=version Override package version (if you are a packager needing to pretend) ], - [ PACKAGE_VERSION="$withval" ]) - -AC_ARG_WITH(pkg-name, - [ --with-pkg-name=name Override package name (if you are a packager needing to pretend) ], - [ PACKAGE_NAME="$withval" ]) - dnl 1.11: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl silent-rules: allow "--enable-silent-rules" (no-op in 1.13+) dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.11 foreign tar-ustar silent-rules subdir-objects]) dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ -AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", - [Current pacemaker version]) - dnl Versioned attributes implementation is not yet production-ready AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes]) -PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` -AC_SUBST(PACKAGE_SERIES) -AC_SUBST(PACKAGE_VERSION) - CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AC_PROG_CC_STDC AC_PROG_CXX dnl C++ is not needed for build, just maintainer utilities gl_EARLY # We expressly don't want to introduce OpenSSL dependency by default. gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT LT_INIT([dlopen]) LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)], [RC=1; AC_MSG_RESULT(no)]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } dnl =============================================== dnl Configure Options dnl =============================================== -dnl Some systems, like Solaris require a custom package name -AC_ARG_WITH(pkgname, - [ --with-pkgname=name name for pkg (typically for Solaris) ], - [ PKGNAME="$withval" ], - [ PKGNAME="LXHAhb" ], - ) -AC_SUBST(PKGNAME) + +dnl --enable-* options AC_ARG_ENABLE([ansi], - [ --enable-ansi Force GCC to compile to ANSI standard for older compilers. @<:@no@:>@]) + [AS_HELP_STRING([--enable-ansi], + [force GCC to compile to ANSI standard for older compilers. @<:@no@:>@])], +) AC_ARG_ENABLE([fatal-warnings], - [ --enable-fatal-warnings Enable pedantic and fatal warnings for gcc @<:@yes@:>@]) + [AS_HELP_STRING([--enable-fatal-warnings], + [enable pedantic and fatal warnings for gcc @<:@yes@:>@])], +) AC_ARG_ENABLE([quiet], - [ --enable-quiet Suppress make output unless there is an error @<:@no@:>@]) + [AS_HELP_STRING([--enable-quiet], + [suppress make output unless there is an error @<:@no@:>@])], +) AC_ARG_ENABLE([no-stack], - [ --enable-no-stack Build only the scheduler and its requirements @<:@no@:>@]) + [AS_HELP_STRING([--enable-no-stack], + [build only the scheduler and its requirements @<:@no@:>@])], +) AC_ARG_ENABLE([upstart], - [ --enable-upstart Enable support for managing resources via Upstart @<:@try@:>@ ], + [AS_HELP_STRING([--enable-upstart], + [enable support for managing resources via Upstart @<:@try@:>@])], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], - [ --enable-systemd Enable support for managing resources via systemd @<:@try@:>@], + [AS_HELP_STRING([--enable-systemd], + [enable support for managing resources via systemd @<:@try@:>@])], [], [enable_systemd=try], ) -AC_ARG_ENABLE(hardening, - [ --enable-hardening Harden the resulting executables/libraries @<:@try@:>@], +AC_ARG_ENABLE([hardening], + [AS_HELP_STRING([--enable-hardening], + [harden the resulting executables/libraries @<:@try@:>@])], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) # By default, we add symlinks at the pre-2.0.0 daemon name locations, so that: # (1) tools that directly invoke those names for metadata etc. will still work # (2) this installation can be used in a bundle container image used with # cluster hosts running Pacemaker 1.1.17+ # If you know your target systems will not have any need for it, you can # disable this option. Once the above use cases are no longer in wide use, we # can disable this option by default, and once we no longer want to support # them at all, we can drop the option altogether. -AC_ARG_ENABLE(legacy-links, - [ --enable-legacy-links Add symlinks for old daemon names @<:@yes@:>@], +AC_ARG_ENABLE([legacy-links], + [AS_HELP_STRING([--enable-legacy-links], + [add symlinks for old daemon names @<:@yes@:>@])], [ LEGACY_LINKS="${enableval}" ], [ LEGACY_LINKS=yes ], ) AM_CONDITIONAL(BUILD_LEGACY_LINKS, test "x${LEGACY_LINKS}" = "xyes") -AC_ARG_WITH(corosync, - [ --with-corosync Support the Corosync messaging and membership layer ], + +dnl --with-* options + +AC_DEFUN([VERSION_ARG], + [AC_ARG_WITH([version], + [AS_HELP_STRING([--with-version=VERSION], + [override package version @<:@$1@:>@])], + [ PACKAGE_VERSION="$withval" ])] +) +VERSION_ARG(VERSION_NUMBER) + +AC_ARG_WITH([corosync], + [AS_HELP_STRING([--with-corosync], + [support the Corosync messaging and membership layer])], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) -AC_ARG_WITH(nagios, - [ --with-nagios Support nagios remote monitoring ], +AC_ARG_WITH([nagios], + [AS_HELP_STRING([--with-nagios], + [support nagios remote monitoring])], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) -AC_ARG_WITH(nagios-plugin-dir, - [ --with-nagios-plugin-dir=DIR Directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@], +AC_ARG_WITH([nagios-plugin-dir], + [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], + [directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) -AC_ARG_WITH(nagios-metadata-dir, - [ --with-nagios-metadata-dir=DIR Directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@], +AC_ARG_WITH([nagios-metadata-dir], + [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], + [directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) -AC_ARG_WITH(acl, - [ --with-acl Support CIB ACL ], +AC_ARG_WITH([acl], + [AS_HELP_STRING([--with-acl], + [support CIB ACL])], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) -AC_ARG_WITH(cibsecrets, - [ --with-cibsecrets Support separate file for CIB secrets ], +AC_ARG_WITH([cibsecrets], + [AS_HELP_STRING([--with-cibsecrets], + [support separate file for CIB secrets])], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) PCMK_GNUTLS_PRIORITIES="NORMAL" -AC_ARG_WITH(gnutls-priorities, - [ --with-gnutls-priorities Default GnuTLS cipher priorities @<:@NORMAL@:>@ ], - [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ]) +AC_ARG_WITH([gnutls-priorities], + [AS_HELP_STRING([--with-gnutls-priorities], + [default GnuTLS cipher priorities @<:@NORMAL@:>@])], + [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] +) INITDIR="" -AC_ARG_WITH(initdir, - [ --with-initdir=DIR Directory for init (rc) scripts], - [ INITDIR="$withval" ]) +AC_ARG_WITH([initdir], + [AS_HELP_STRING([--with-initdir=DIR], + [directory for init (rc) scripts])], + [ INITDIR="$withval" ] +) + +systemdsystemunitdir="${systemdsystemunitdir-}" +AC_ARG_WITH([systemdsystemunitdir], + [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], + [directory for systemd unit files (advanced option: must match what systemd uses)])], + [ systemdsystemunitdir="$withval" ] +) SUPPORT_PROFILING=0 -AC_ARG_WITH(profiling, - [ --with-profiling Disable optimizations for effective profiling ], - [ SUPPORT_PROFILING=$withval ]) +AC_ARG_WITH([profiling], + [AS_HELP_STRING([--with-profiling], + [disable optimizations for effective profiling])], + [ SUPPORT_PROFILING=$withval ] +) -AC_ARG_WITH(coverage, - [ --with-coverage Disable optimizations for effective profiling ], - [ SUPPORT_COVERAGE=$withval ]) +AC_ARG_WITH([coverage], + [AS_HELP_STRING([--with-coverage], + [disable optimizations for effective profiling])], + [ SUPPORT_COVERAGE=$withval ] +) PUBLICAN_BRAND="common" -AC_ARG_WITH(brand, - [ --with-brand=brand Brand to use for generated documentation (set empty for no docs) @<:@common@:>@], - [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ]) +AC_ARG_WITH([brand], + [AS_HELP_STRING([--with-brand=brand], + [brand to use for generated documentation (set empty for no docs) @<:@common@:>@])], + [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ] +) AC_SUBST(PUBLICAN_BRAND) BUG_URL="" -AC_ARG_WITH(bug-url, - [ --with-bug-url=DIR Address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@], +AC_ARG_WITH([bug-url], + [AS_HELP_STRING([--with-bug-url=DIR], + [address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@])], [ BUG_URL="$withval" ] ) CONFIGDIR="" -AC_ARG_WITH(configdir, - [ --with-configdir=DIR Directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@], +AC_ARG_WITH([configdir], + [AS_HELP_STRING([--with-configdir=DIR], + [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) CRM_LOG_DIR="" -AC_ARG_WITH(logdir, - [ --with-logdir=DIR Directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@ ], +AC_ARG_WITH([logdir], + [AS_HELP_STRING([--with-logdir=DIR], + [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" -AC_ARG_WITH(bundledir, - [ --with-bundledir=DIR Directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@ ], +AC_ARG_WITH([bundledir], + [AS_HELP_STRING([--with-bundledir=DIR], + [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) +dnl The not-yet-released autoconf 2.70 will have a --runstatedir option. +dnl Until that's available, emulate it with our own --with-runstatedir. +pcmk_runstatedir="" +AC_ARG_WITH([runstatedir], + [AS_HELP_STRING([--with-runstatedir=DIR], + [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], + [ pcmk_runstatedir="$withval" ] +) + +dnl This defaults to /usr/lib rather than libdir because it's determined by the +dnl OCF project and not pacemaker. Even if a user wants to install pacemaker to +dnl /usr/local or such, the OCF agents will be expected in their usual +dnl location. However, we do give the user the option to override it. +OCF_ROOT_DIR="/usr/lib/ocf" +AC_ARG_WITH([ocfdir], + [AS_HELP_STRING([--with-ocfdir=DIR], + [OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@/usr/lib/ocf@:>@])], + [ OCF_ROOT_DIR="$withval" ] +) +AC_SUBST(OCF_ROOT_DIR) + +CRM_DAEMON_USER="" +AC_ARG_WITH([daemon-user], + [AS_HELP_STRING([--with-daemon-user=USER], + [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], + [ CRM_DAEMON_USER="$withval" ] +) + +CRM_DAEMON_GROUP="" +AC_ARG_WITH([daemon-group], + [AS_HELP_STRING([--with-daemon-group=GROUP], + [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], + [ CRM_DAEMON_GROUP="$withval" ] +) + + +dnl Deprecated options + +AC_ARG_WITH([pkg-name], + [AS_HELP_STRING([--with-pkg-name=name], + [deprecated and unused (will be removed in a future release)])], +) + +AC_ARG_WITH([pkgname], + [AS_HELP_STRING([--with-pkgname=name], + [deprecated and unused (will be removed in a future release)])], +) + + dnl =============================================== dnl General Processing dnl =============================================== +AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", + [Current pacemaker version]) + +PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` +AC_SUBST(PACKAGE_SERIES) +AC_SUBST(PACKAGE_VERSION) + AC_PROG_LN_S AC_PROG_MKDIR_P if cc_supports_flag -Werror; then WERROR="-Werror" else WERROR="" fi # Normalize enable_fatal_warnings (defaulting to yes, when compiler supports it) if test "x${enable_fatal_warnings}" != "xno" ; then if test "$GCC" = "yes" && test "x${WERROR}" != "x" ; then enable_fatal_warnings=yes else AC_MSG_NOTICE(Compiler does not support fatal warnings) enable_fatal_warnings=no fi fi INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in prefix|NONE) exec_prefix=$prefix ;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR) ;; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in prefix|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables + +if [ test "x${runstatedir}" = "x" ]; then + if [ test "x${pcmk_runstatedir}" = "x" ]; then + runstatedir="${localstatedir}/run" + else + runstatedir="${pcmk_runstatedir}" + fi +fi +eval runstatedir="$(eval echo ${runstatedir})" +AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], + [Location for modifiable per-process data]) +AC_SUBST(runstatedir) + eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) if test x"${CRM_LOG_DIR}" = x""; then CRM_LOG_DIR="${localstatedir}/log/pacemaker" fi AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) if test x"${CRM_BUNDLE_DIR}" = x""; then CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles" fi AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) if test x"${PCMK_GNUTLS_PRIORITIES}" = x""; then AC_MSG_ERROR([Empty string not applicable with --with-gnutls-priorities]) fi AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) if test x"${BUG_URL}" = x""; then BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker" fi AC_SUBST(BUG_URL) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([US_AUTH_PEERCRED_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([US_AUTH_PEERCRED_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include ]]) ], [[#define _GNU_SOURCE #include ]]) ], [], [[#include ]]) ]) if test -z "${us_auth}"; then # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([US_AUTH_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([US_AUTH_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_ERROR([No way to authenticate a Unix socket peer]) ], [[#include ]]) ]) fi dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac ;; esac # C99 doesn't guarantee uint64_t type and related format specifiers, but # prerequisites, corosync + libqb, use that widely, so the target platforms # are already pre-constrained to those "64bit-clean" (doesn't imply native # bit width) and hence we deliberately refrain from artificial surrogates # (sans manipulation through cached values). AC_CACHE_VAL( [pcmk_cv_decl_inttypes], [ AC_CHECK_DECLS( [PRIu64, PRIu32, PRIx32, SCNu64], [pcmk_cv_decl_inttypes="PRIu64 PRIu32 PRIx32 SCNu64"], [ # test shall only react on "no" cached result & error out respectively if test "x$ac_cv_have_decl_PRIu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint64_t (PRIu64)]) elif test "x$ac_cv_have_decl_PRIu32" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint32_t (PRIu32)]) elif test "x$ac_cv_have_decl_PRIx32" = xno; then AC_MSG_ERROR([lack of inttypes.h based hexa specifier serving uint32_t (PRIx32)]) elif test "x$ac_cv_have_decl_SCNu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier gathering uint64_t (SCNu64)]) fi ], [[#include ]] ) ] ) ( set $pcmk_cv_decl_inttypes AC_DEFINE_UNQUOTED([U64T], [$1], [Correct format specifier for U64T]) AC_DEFINE_UNQUOTED([U32T], [$2], [Correct format specifier for U32T]) AC_DEFINE_UNQUOTED([X32T], [$3], [Correct format specifier for X32T]) AC_DEFINE_UNQUOTED([U64TS], [$4], [Correct format specifier for U64TS]) ) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path AC_PATH_PROG([PYTHON], [$PYTHON]) case "x$PYTHON" in x*python3*|x*platform-python*) dnl When used with Python 3, Pacemaker requires a minimum of 3.2 AM_PATH_PYTHON([3.2]) ;; *) dnl Otherwise, Pacemaker requires a minimum of 2.7 AM_PATH_PYTHON([2.7]) ;; esac AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([PUBLICAN], [publican]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) dnl BASH is already an environment variable, so use something else AC_PATH_PROG([BASH_PATH], [bash]) PKG_PROG_PKG_CONFIG # PKG_NOARCH_INSTALLDIR not available prior to pkg-config 0.27 and # pkgconf 0.8.10, respectively (next line is to mimic that scenario) dnl m4_ifdef([PKG_NOARCH_INSTALLDIR], [m4_undefine([PKG_NOARCH_INSTALLDIR])]) m4_ifndef([PKG_NOARCH_INSTALLDIR], [ AC_DEFUN([PKG_NOARCH_INSTALLDIR], [ AC_SUBST([noarch_pkgconfigdir], ['${datadir}/pkgconfig']) ]) ]) PKG_NOARCH_INSTALLDIR AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi dnl Bash is needed for building man pages and running regression tests if test x"${BASH_PATH}" = x""; then AC_MSG_ERROR(bash must be installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) if test "x${ASCIIDOC_CONV}" != x; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi publican_intree_brand=no if test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""; then dnl special handling for clusterlabs brand (possibly in-tree version used) test "${PUBLICAN_BRAND}" != "clusterlabs" \ || test -d /usr/share/publican/Common_Content/clusterlabs if test $? -ne 0; then dnl Unknown option: brand_dir vs. Option brand_dir requires an argument if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common]) PUBLICAN_BRAND=common else publican_intree_brand=yes fi fi AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand]) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi AM_CONDITIONAL([BUILD_DOCBOOK], [test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""]) AM_CONDITIONAL([PUBLICAN_INTREE_BRAND], [test x"${publican_intree_brand}" = x"yes"]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH; do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then $GETOPT_PATH -T >/dev/null 2>/dev/null if test $? -eq 4; then break fi fi GETOPT_PATH="" done IFS=$IFS_orig if test -n "$GETOPT_PATH"; then AC_MSG_RESULT([$GETOPT_PATH]) else AC_MSG_RESULT([no]) AC_MSG_ERROR(Pacemaker build requires a GNU-compatible getopt) fi AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolearably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_FUNCS([sched_setscheduler]) if test "$ac_cv_func_sched_setscheduler" != yes; then PC_LIBS_RT="" else PC_LIBS_RT="-lrt" fi AC_SUBST(PC_LIBS_RT) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi if test x"${PKG_CONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi # Require glib 2.16.0 (2008-03) or later for g_hash_table_iter_init() etc. PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.16.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi dnl FreeBSD needs -lcompat for ftime() used by lrmd.c AC_CHECK_LIB([compat], [ftime], [COMPAT_LIBS='-lcompat']) AC_SUBST(COMPAT_LIBS) dnl ======================================================================== dnl Headers dnl ======================================================================== dnl Some distributions insert #warnings into deprecated headers such as dnl timeb.h. If we will enable fatal warnings for the build, then enable dnl them for the header checks as well, otherwise the build could fail dnl even though the header check succeeds. (We should probably be doing dnl this in more places.) if test "x${enable_fatal_warnings}" = xyes ; then cc_temp_flags "$CFLAGS $WERROR" fi AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/swab.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/reboot.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/signalfd.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/timeb.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) if test "x${enable_fatal_warnings}" = xyes ; then cc_restore_flags fi dnl These headers need prerequisites before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) AC_CHECK_HEADERS(libxml/xpath.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(libxml development headers not found) fi AC_CHECK_LIB(xslt, xsltApplyStylesheet, [], AC_MSG_ERROR(Unsupported libxslt library version)) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(libxslt development headers not found) fi AC_CACHE_CHECK(whether __progname and __progname_full are available, pf_cv_var_progname, AC_TRY_LINK([extern char *__progname, *__progname_full;], [__progname = "foo"; __progname_full = "foo bar";], pf_cv_var_progname="yes", pf_cv_var_progname="no")) if test "$pf_cv_var_progname" = "yes"; then AC_DEFINE(HAVE___PROGNAME,1,[ ]) fi dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include ]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include ]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) if test "$pf_cv_var_sscanf" = "yes"; then AC_DEFINE(SSCANF_HAS_M, 1, [ ]) fi dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_TRY_COMPILE([#include ],[sighandler_t *f;], has_sighandler_t=yes,has_sighandler_t=no) AC_MSG_RESULT($has_sighandler_t) if test "$has_sighandler_t" = "yes" ; then AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual; then ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" cc_temp_flags "-Wcast-qual $WERROR" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 if cc_supports_flag -fPIC; then CFLAGS="$CFLAGS -fPIC" fi AC_MSG_CHECKING(whether printw() requires argument of "const char *") AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include #elif defined(HAVE_NCURSES_NCURSES_H) # include #elif defined(HAVE_CURSES_H) # include #endif ], [printw((const char *)"Test");] )], [pcmk_cv_compatible_printw=yes], [pcmk_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS cc_restore_flags AC_MSG_RESULT([$pcmk_cv_compatible_printw]) if test "$pcmk_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0 ;; esac AC_MSG_NOTICE(New CFLAGS: $CFLAGS) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== if test x${enable_no_stack} = xyes; then SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb >= 0.13) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 0.14.0+ (2012-06) AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" dnl libqb 0.17.0+ (2014-02) AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function])) dnl libqb not yet released (as of 2018-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_CHECK_HEADERS(stonith/stonith.h) if test "$ac_cv_header_stonith_stonith_h" = "yes"; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) -CRM_DAEMON_USER="hacluster" +if test x"${CRM_DAEMON_USER}" = x""; then + CRM_DAEMON_USER="hacluster" +fi AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) -CRM_DAEMON_GROUP="haclient" +if test x"${CRM_DAEMON_GROUP}" = x""; then + CRM_DAEMON_GROUP="haclient" +fi AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) -CRM_STATE_DIR=${localstatedir}/run/crm -AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets) -AC_SUBST(CRM_STATE_DIR) - CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) -HA_STATE_DIR="${localstatedir}/run" -AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where sbd keeps its PID file) -AC_SUBST(HA_STATE_DIR) +CRM_STATE_DIR="${runstatedir}/crm" +AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], + [Where to keep state files and sockets]) +AC_SUBST(CRM_STATE_DIR) -CRM_RSCTMP_DIR="${localstatedir}/run/resource-agents" +CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) -OCF_ROOT_DIR="/usr/lib/ocf" -if test "X$OCF_ROOT_DIR" = X; then - AC_MSG_ERROR(Could not locate OCF directory) -fi -AC_SUBST(OCF_ROOT_DIR) - OCF_RA_DIR="$OCF_ROOT_DIR/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) RH_STONITH_DIR="$sbindir" AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir", Location for system binaries) RH_STONITH_PREFIX="fence_" AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) AC_CHECK_TYPES([DBusBasicValue],,,[[#include ]]) if test $HAVE_dbus = 0; then PC_NAME_DBUS="" else PC_NAME_DBUS="dbus-1" fi AC_SUBST(PC_NAME_DBUS) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without DBus]) else enable_systemd=no fi fi if test "x${enable_systemd}" = xtry; then AC_MSG_CHECKING([for systemd version query result via dbus-send]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || systemctl --version 2>/dev/null | grep -q systemd; then enable_systemd=yes else enable_systemd=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) HAVE_systemd=0 if test "x${enable_systemd}" = xyes; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" - AC_MSG_CHECKING([for systemd path for system unit files]) - systemdunitdir="${systemdunitdir-}" - PKG_CHECK_VAR([systemdunitdir], [systemd], - [systemdsystemunitdir], [], [systemdunitdir=no]) - AC_MSG_RESULT([${systemdunitdir}]) - if test "x${systemdunitdir}" = xno; then - AC_MSG_FAILURE([cannot enable systemd when systemdunitdir unresolved]) + AC_MSG_CHECKING([which system unit file directory to use]) + PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) + AC_MSG_RESULT([${systemdsystemunitdir}]) + if test "x${systemdsystemunitdir}" = x""; then + AC_MSG_FAILURE([cannot enable systemd when systemdsystemunitdir unresolved]) fi fi -AC_SUBST(systemdunitdir) +AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then AC_MSG_FAILURE([cannot enable Upstart without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then AC_MSG_CHECKING([for Upstart version query result via dbus-send]) ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) HAVE_upstart=0 if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) case $SUPPORT_NAGIOS in 1|yes|true|try) SUPPORT_NAGIOS=1 ;; *) SUPPORT_NAGIOS=0 ;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_CS=no ;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS) SUPPORT_CS=1 PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal PKG_CHECK_MODULES(libcorosync_common, libcorosync_common) dnl Fatal CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-native" fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AC_SUBST(SUPPORT_COROSYNC) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported, building only the scheduler) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_ACL=no ;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) SUPPORT_ACL=1 AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then SUPPORT_ACL=0 fi if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1 ;; *) SUPPORT_CIBSECRETS=0 ;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) if test $SUPPORT_CIBSECRETS = 1; then PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) fi dnl ======================================================================== dnl GnuTLS dnl ======================================================================== dnl gnutls_priority_set_direct available since 2.1.7 (released 2007-11-29) AC_CHECK_LIB(gnutls, gnutls_priority_set_direct) if test "$ac_cv_lib_gnutls_gnutls_priority_set_direct" != ""; then AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_FUNCS([gnutls_sec_param_to_pk_bits]) dnl since 2.12.0 (2011-03-24) if test "$ac_cv_header_gnutls_gnutls_h" != "yes"; then PC_NAME_GNUTLS="" else PC_NAME_GNUTLS="gnutls" fi AC_SUBST(PC_NAME_GNUTLS) fi dnl ======================================================================== dnl PAM dnl ======================================================================== AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKG_CONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKG_CONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" else CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags if cc_supports_flag "-Wformat-nonliteral"; then gcc_format_nonliteral=yes else gcc_format_nonliteral=no fi # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat-security -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wwrite-strings -Wunused-but-set-variable -Wunsigned-char" if test "x$gcc_diagnostic_push_pull" = "xyes"; then AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" else if test "x$gcc_format_nonliteral" = "xyes"; then EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" fi fi # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $CC_EXTRAS $j then CC_EXTRAS="$CC_EXTRAS $j" fi done if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl cib, stonithd, pacemaker-remoted, pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi if test "x${HARDENING}" = "xno"; then AC_MSG_NOTICE([Hardening: explicitly disabled]) elif test "x${HARDENING}" = "xyes" \ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then dnl We'll figure out on our own... CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; relro=1]) # daemons: PIE for both CFLAGS and LDFLAGS if cc_supports_flag -fPIE; then flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; pie=1]) fi # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries if test "${relro}" = 1 && test "${pie}" = 1; then flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; bindnow=1]) fi if test "${bindnow}" = 1; then flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"]) fi # universal: prefer strong > all > default stack protector if possible flag= if cc_supports_flag -fstack-protector-strong; then flag="-fstack-protector-strong" elif cc_supports_flag -fstack-protector-all; then flag="-fstack-protector-all" elif cc_supports_flag -fstack-protector; then flag="-fstack-protector" fi if test -n "${flag}"; then CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 fi if test "${relro}" = 1 \ || test "${pie}" = 1 \ || test "${stackprot}" = 1; then AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) else AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) fi else AC_MSG_NOTICE([Hardening: using custom flags]) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS $WERROR" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff... AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant fi AC_MSG_RESULT(Suppress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl Files we output that need to be executable AC_CONFIG_FILES([cts/CTSlab.py], [chmod +x cts/CTSlab.py]) AC_CONFIG_FILES([cts/LSBDummy], [chmod +x cts/LSBDummy]) AC_CONFIG_FILES([cts/OCFIPraTest.py], [chmod +x cts/OCFIPraTest.py]) AC_CONFIG_FILES([cts/cluster_test], [chmod +x cts/cluster_test]) AC_CONFIG_FILES([cts/cts], [chmod +x cts/cts]) AC_CONFIG_FILES([cts/cts-cli], [chmod +x cts/cts-cli]) AC_CONFIG_FILES([cts/cts-coverage], [chmod +x cts/cts-coverage]) AC_CONFIG_FILES([cts/cts-exec], [chmod +x cts/cts-exec]) AC_CONFIG_FILES([cts/cts-fencing], [chmod +x cts/cts-fencing]) AC_CONFIG_FILES([cts/cts-log-watcher], [chmod +x cts/cts-log-watcher]) AC_CONFIG_FILES([cts/cts-regression], [chmod +x cts/cts-regression]) AC_CONFIG_FILES([cts/cts-scheduler], [chmod +x cts/cts-scheduler]) AC_CONFIG_FILES([cts/cts-support], [chmod +x cts/cts-support]) AC_CONFIG_FILES([cts/lxc_autogen.sh], [chmod +x cts/lxc_autogen.sh]) AC_CONFIG_FILES([cts/benchmark/clubench], [chmod +x cts/benchmark/clubench]) AC_CONFIG_FILES([cts/fence_dummy], [chmod +x cts/fence_dummy]) AC_CONFIG_FILES([cts/pacemaker-cts-dummyd], [chmod +x cts/pacemaker-cts-dummyd]) AC_CONFIG_FILES([daemons/fenced/fence_legacy], [chmod +x daemons/fenced/fence_legacy]) AC_CONFIG_FILES([doc/abi-check], [chmod +x doc/abi-check]) AC_CONFIG_FILES([extra/resources/ClusterMon], [chmod +x extra/resources/ClusterMon]) AC_CONFIG_FILES([extra/resources/HealthSMART], [chmod +x extra/resources/HealthSMART]) AC_CONFIG_FILES([extra/resources/SysInfo], [chmod +x extra/resources/SysInfo]) AC_CONFIG_FILES([extra/resources/ifspeed], [chmod +x extra/resources/ifspeed]) AC_CONFIG_FILES([extra/resources/o2cb], [chmod +x extra/resources/o2cb]) AC_CONFIG_FILES([tools/crm_failcount], [chmod +x tools/crm_failcount]) AC_CONFIG_FILES([tools/crm_master], [chmod +x tools/crm_master]) AC_CONFIG_FILES([tools/crm_report], [chmod +x tools/crm_report]) AC_CONFIG_FILES([tools/crm_standby], [chmod +x tools/crm_standby]) AC_CONFIG_FILES([tools/cibsecret], [chmod +x tools/cibsecret]) dnl Other files we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTS.py \ cts/CTSvars.py \ cts/benchmark/Makefile \ cts/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/schedulerd/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/Clusters_from_Scratch/publican.cfg \ doc/Pacemaker_Administration/publican.cfg \ doc/Pacemaker_Development/publican.cfg \ doc/Pacemaker_Explained/publican.cfg \ doc/Pacemaker_Remote/publican.cfg \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ replace/Makefile \ lib/Makefile \ lib/libpacemaker.pc \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/common/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/gnu/Makefile \ lib/pacemaker/Makefile \ lib/pengine/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ maint/Makefile \ tools/Makefile \ tools/report.collector \ tools/report.common \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_RESULT([ Unix socket auth method = ${us_auth}]) diff --git a/cts/Makefile.am b/cts/Makefile.am index a0541e56c2..a6bc3be9c4 100644 --- a/cts/Makefile.am +++ b/cts/Makefile.am @@ -1,102 +1,104 @@ # # Copyright 2001-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # MAINTAINERCLEANFILES = Makefile.in noinst_SCRIPTS = cluster_test \ OCFIPraTest.py # Commands intended to be run only via other commands halibdir = $(CRM_DAEMON_DIR) dist_halib_SCRIPTS = cts-log-watcher \ cts-support # Test commands and globally applicable test files should be in $(testdir), # and command-specific test data should be in a command-specific subdirectory. testdir = $(datadir)/$(PACKAGE)/tests test_SCRIPTS = cts-cli \ cts-coverage \ cts-exec \ cts-fencing \ cts-regression \ cts-scheduler dist_test_DATA = README.md \ valgrind-pcmk.suppressions ctslibdir = $(pythondir)/cts ctslib_PYTHON = __init__.py \ CIB.py \ cib_xml.py \ CM_common.py \ CM_corosync.py \ CTSaudits.py \ CTSscenarios.py \ CTStests.py \ environment.py \ logging.py \ patterns.py \ remote.py \ watcher.py nodist_ctslib_PYTHON = CTS.py \ CTSvars.py ctsdir = $(testdir)/cts cts_DATA = pacemaker-cts-dummyd@.service if BUILD_UPSTART dist_cts_DATA = pacemaker-cts-dummyd.conf endif cts_SCRIPTS = CTSlab.py \ cts \ fence_dummy \ LSBDummy \ lxc_autogen.sh \ pacemaker-cts-dummyd clidir = $(testdir)/cli dist_cli_DATA = cli/crm_diff_new.xml \ cli/crm_diff_old.xml \ cli/regression.acls.exp \ cli/regression.dates.exp \ cli/regression.rules.exp \ cli/regression.tools.exp \ cli/regression.upgrade.exp \ cli/regression.validity.exp PE_TESTS = $(wildcard scheduler/*.scores) pedir = $(testdir)/scheduler dist_pe_DATA = $(PE_TESTS) \ $(PE_TESTS:%.scores=%.xml) \ $(PE_TESTS:%.scores=%.exp) \ $(PE_TESTS:%.scores=%.dot) \ $(PE_TESTS:%.scores=%.summary) \ $(wildcard scheduler/*.stderr) # For "make check", run a single scheduler test TESTS = scheduler/bug-rh-1097457.xml TEST_EXTENSIONS = .xml XML_LOG_COMPILER = ./cts-scheduler AM_XML_LOG_FLAGS = --io-dir="$(srcdir)/scheduler" \ --out-dir="$(builddir)" \ -V --run scheduler-list: @for T in "$(srcdir)"/scheduler/*.xml; do \ echo $$(basename $$T .xml); \ done +CLEANFILES = $(builddir)/.regression.failed.diff + clean-local: rm -f scheduler/*.pe.* SUBDIRS = benchmark cts-support-install: cts-support ./cts-support install cts-support-uninstall: cts-support ./cts-support uninstall diff --git a/cts/cts-support.in b/cts/cts-support.in index 599b371be1..8a0822fa0e 100644 --- a/cts/cts-support.in +++ b/cts/cts-support.in @@ -1,149 +1,149 @@ #!/bin/sh # # Installer for support files needed by Pacemaker's Cluster Test Suite # -# Copyright 2018 the Pacemaker project contributors +# Copyright 2018-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # USAGE_TEXT="Usage: $0 " HELP_TEXT="$USAGE_TEXT Commands (must be run as root): install Install support files needed by Pacemaker CTS uninstall Remove support files needed by Pacemaker CTS" # These constants must track crm_exit_t values CRM_EX_OK=0 CRM_EX_ERROR=1 CRM_EX_USAGE=64 -UNIT_DIR="@systemdunitdir@" +UNIT_DIR="@systemdsystemunitdir@" LIBEXEC_DIR="@libexecdir@/pacemaker" INIT_DIR="@INITDIR@" SBIN_DIR="@sbindir@" DATA_DIR="@datadir@/pacemaker/tests/cts" UPSTART_DIR="/etc/init" DUMMY_DAEMON="pacemaker-cts-dummyd" DUMMY_DAEMON_UNIT="pacemaker-cts-dummyd@.service" LSB_DUMMY="LSBDummy" UPSTART_DUMMY="pacemaker-cts-dummyd.conf" FENCE_DUMMY="fence_dummy" FENCE_DUMMY_ALIASES="fence_dummy_auto_unfence fence_dummy_no_reboot" # If the install directory doesn't exist, assume we're in a build directory. if [ ! -d "$DATA_DIR" ]; then # If readlink supports -e (i.e. GNU), use it. readlink -e / >/dev/null 2>/dev/null if [ $? -eq 0 ]; then DATA_DIR="$(dirname "$(readlink -e "$0")")" else DATA_DIR="$(dirname "$0")" fi fi usage() { echo "Error:" "$@" echo "$USAGE_TEXT" exit $CRM_EX_USAGE } must_be_root() { if ! [ "$(id -u)" = "0" ]; then usage "this command must be run as root" return $CRM_EX_ERROR fi return $CRM_EX_OK } support_uninstall() { must_be_root || return $CRM_EX_ERROR if [ -e "$UNIT_DIR/$DUMMY_DAEMON_UNIT" ]; then echo "Removing $UNIT_DIR/$DUMMY_DAEMON_UNIT ..." rm -f "$UNIT_DIR/$DUMMY_DAEMON_UNIT" systemctl daemon-reload # Ignore failure fi for FILE in \ "$LIBEXEC_DIR/$DUMMY_DAEMON" \ "$UPSTART_DIR/$UPSTART_DUMMY" \ "$SBIN_DIR/$FENCE_DUMMY" \ "$INIT_DIR/$LSB_DUMMY" do if [ -e "$FILE" ]; then echo "Removing $FILE ..." rm -f "$FILE" fi done for ALIAS in $FENCE_DUMMY_ALIASES; do \ FILE="$SBIN_DIR/$ALIAS" if [ -L "$FILE" ] || [ -e "$FILE" ]; then echo "Removing $FILE ..." rm -f "$FILE" fi done return $CRM_EX_OK } support_install() { support_uninstall || return $CRM_EX_ERROR cd "$DATA_DIR" || return $CRM_EX_ERROR if [ -d "$UNIT_DIR" ]; then echo "Installing $DUMMY_DAEMON ..." mkdir -p "$LIBEXEC_DIR" install -m 0755 "$DUMMY_DAEMON" "$LIBEXEC_DIR" || return $CRM_EX_ERROR echo "Installing $DUMMY_DAEMON_UNIT ..." install -m 0644 "$DUMMY_DAEMON_UNIT" "$UNIT_DIR" || return $CRM_EX_ERROR systemctl daemon-reload # Ignore failure fi echo "Installing $FENCE_DUMMY to $SBIN_DIR ..." mkdir -p "$SBIN_DIR" install -m 0755 "$FENCE_DUMMY" "$SBIN_DIR" || return $CRM_EX_ERROR for alias in $FENCE_DUMMY_ALIASES; do \ echo "Installing $alias to $SBIN_DIR ..." ln -s "$FENCE_DUMMY" "$SBIN_DIR/$alias" || return $CRM_EX_ERROR done echo "Installing $LSB_DUMMY to $INIT_DIR ..." mkdir -p "$INIT_DIR" install -m 0755 "$LSB_DUMMY" "$INIT_DIR" || return $CRM_EX_ERROR if [ -d "$UPSTART_DIR" ] && [ -f "$UPSTART_DUMMY" ]; then echo "Installing $UPSTART_DUMMY to $UPSTART_DIR ..." install -m 0644 "$UPSTART_DUMMY" "$UPSTART_DIR" || return $CRM_EX_ERROR fi return $CRM_EX_OK } COMMAND="" while [ $# -gt 0 ] ; do case "$1" in --help) echo "$HELP_TEXT" exit $CRM_EX_OK ;; install|uninstall) COMMAND="$1" shift ;; *) usage "unknown option '$1'" ;; esac done case "$COMMAND" in install) support_install ;; uninstall) support_uninstall ;; *) usage "must specify command" ;; esac diff --git a/daemons/execd/Makefile.am b/daemons/execd/Makefile.am index 3db78af9ba..048f7f3b44 100644 --- a/daemons/execd/Makefile.am +++ b/daemons/execd/Makefile.am @@ -1,62 +1,62 @@ # # Copyright 2012-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU Lesser General Public License # version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/Makefile.common halibdir = $(CRM_DAEMON_DIR) halib_PROGRAMS = pacemaker-execd cts-exec-helper initdir = $(INITDIR) init_SCRIPTS = pacemaker_remote sbin_PROGRAMS = pacemaker-remoted if BUILD_SYSTEMD -systemdunit_DATA = pacemaker_remote.service +systemdsystemunit_DATA = pacemaker_remote.service endif pacemaker_execd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_execd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemaker_execd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/services/libcrmservice.la \ $(top_builddir)/lib/fencing/libstonithd.la ${COMPAT_LIBS} pacemaker_execd_SOURCES = pacemaker-execd.c execd_commands.c \ execd_alerts.c pacemaker_remoted_CPPFLAGS = -DSUPPORT_REMOTE $(AM_CPPFLAGS) pacemaker_remoted_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemaker_remoted_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemaker_remoted_LDADD = $(pacemaker_execd_LDADD) \ $(top_builddir)/lib/lrmd/liblrmd.la pacemaker_remoted_SOURCES = $(pacemaker_execd_SOURCES) \ remoted_tls.c remoted_proxy.c cts_exec_helper_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/lrmd/liblrmd.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/services/libcrmservice.la \ $(top_builddir)/lib/pengine/libpe_status.la cts_exec_helper_SOURCES = cts-exec-helper.c noinst_HEADERS = pacemaker-execd.h CLEANFILES = $(man8_MANS) if BUILD_LEGACY_LINKS install-exec-hook: cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f lrmd && $(LN_S) pacemaker-execd lrmd cd $(DESTDIR)$(sbindir) && rm -f pacemaker_remoted && $(LN_S) pacemaker-remoted pacemaker_remoted uninstall-hook: cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f lrmd cd $(DESTDIR)$(sbindir) && rm -f pacemaker_remoted endif diff --git a/daemons/pacemakerd/Makefile.am b/daemons/pacemakerd/Makefile.am index ae37ab4481..ade160f11d 100644 --- a/daemons/pacemakerd/Makefile.am +++ b/daemons/pacemakerd/Makefile.am @@ -1,37 +1,37 @@ # -# Copyright 2004-2018 the Pacemaker project contributors +# Copyright 2004-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/Makefile.common if BUILD_CS_SUPPORT initdir = $(INITDIR) init_SCRIPTS = pacemaker sbin_PROGRAMS = pacemakerd if BUILD_SYSTEMD -systemdunit_DATA = pacemaker.service +systemdsystemunit_DATA = pacemaker.service endif EXTRA_DIST = pacemaker.sysconfig ## SOURCES noinst_HEADERS = pacemakerd.h pacemakerd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemakerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la $(top_builddir)/lib/common/libcrmcommon.la pacemakerd_LDADD += $(CLUSTERLIBS) pacemakerd_SOURCES = pacemakerd.c pcmkd_corosync.c endif CLEANFILES = $(man8_MANS) diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c index b4e2b89c68..fdc1d9f52e 100644 --- a/daemons/pacemakerd/pacemakerd.c +++ b/daemons/pacemakerd/pacemakerd.c @@ -1,1420 +1,1420 @@ /* * Copyright 2010-2019 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include "pacemakerd.h" #include #include #include #include #include #include #include #include #include /* indirectly: CRM_EX_* */ #include /* cib_channel_ro */ #include #include #include #include #include #include /* PCMK__SPECIAL_PID*, ... */ #ifdef SUPPORT_COROSYNC #include #endif #include #include static gboolean pcmk_quorate = FALSE; static gboolean fatal_error = FALSE; static GMainLoop *mainloop = NULL; static bool global_keep_tracking = false; #define PCMK_PROCESS_CHECK_INTERVAL 5 static const char *local_name = NULL; static uint32_t local_nodeid = 0; static crm_trigger_t *shutdown_trigger = NULL; -static const char *pid_file = "/var/run/pacemaker.pid"; +static const char *pid_file = PCMK_RUN_DIR "/pacemaker.pid"; typedef struct pcmk_child_s { int pid; long flag; int start_seq; int respawn_count; gboolean respawn; const char *name; const char *uid; const char *command; const char *endpoint; /* IPC server name */ gboolean active_before_startup; } pcmk_child_t; /* Index into the array below */ #define PCMK_CHILD_CONTROLD 3 static pcmk_child_t pcmk_children[] = { { 0, crm_proc_none, 0, 0, FALSE, "none", NULL, NULL }, { 0, crm_proc_execd, 3, 0, TRUE, "pacemaker-execd", NULL, CRM_DAEMON_DIR "/pacemaker-execd", CRM_SYSTEM_LRMD }, { 0, crm_proc_based, 1, 0, TRUE, "pacemaker-based", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-based", CIB_CHANNEL_RO }, { 0, crm_proc_controld, 6, 0, TRUE, "pacemaker-controld", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-controld", CRM_SYSTEM_CRMD }, { 0, crm_proc_attrd, 4, 0, TRUE, "pacemaker-attrd", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-attrd", T_ATTRD }, { 0, crm_proc_schedulerd, 5, 0, TRUE, "pacemaker-schedulerd", CRM_DAEMON_USER, CRM_DAEMON_DIR "/pacemaker-schedulerd", CRM_SYSTEM_PENGINE }, { 0, crm_proc_fenced, 2, 0, TRUE, "pacemaker-fenced", NULL, CRM_DAEMON_DIR "/pacemaker-fenced", "stonith-ng" }, }; static gboolean check_active_before_startup_processes(gpointer user_data); static int pcmk_child_active(pcmk_child_t *child); static gboolean start_child(pcmk_child_t * child); static gboolean update_node_processes(uint32_t id, const char *uname, uint32_t procs); void update_process_clients(crm_client_t *client); static uint32_t get_process_list(void) { int lpc = 0; uint32_t procs = crm_get_cluster_proc(); for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) { if (pcmk_children[lpc].pid != 0) { procs |= pcmk_children[lpc].flag; } } return procs; } static void pcmk_process_exit(pcmk_child_t * child) { child->pid = 0; child->active_before_startup = FALSE; /* Broadcast the fact that one of our processes died ASAP * * Try to get some logging of the cause out first though * because we're probably about to get fenced * * Potentially do this only if respawn_count > N * to allow for local recovery */ update_node_processes(local_nodeid, NULL, get_process_list()); child->respawn_count += 1; if (child->respawn_count > MAX_RESPAWN) { crm_err("Child respawn count exceeded by %s", child->name); child->respawn = FALSE; } if (shutdown_trigger) { /* resume step-wise shutdown (returned TRUE yields no parallelizing) */ mainloop_set_trigger(shutdown_trigger); /* intended to speed up propagating expected lay-off of the daemons? */ update_node_processes(local_nodeid, NULL, get_process_list()); } else if (!child->respawn) { /* nothing to do */ } else if (crm_is_true(getenv("PCMK_fail_fast"))) { crm_err("Rebooting system because of %s", child->name); pcmk_panic(__FUNCTION__); } else if (pcmk_child_active(child) == 1) { crm_warn("One-off suppressing strict respawning of a child process %s," " appears alright per %s IPC end-point", child->name, child->endpoint); /* need to monitor how it evolves, and start new process if badly */ child->active_before_startup = TRUE; if (!global_keep_tracking) { global_keep_tracking = true; g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL, check_active_before_startup_processes, NULL); } } else { crm_notice("Respawning failed child process: %s", child->name); start_child(child); } } static void pcmk_exit_with_cluster(int exitcode) { #ifdef SUPPORT_COROSYNC corosync_cfg_handle_t cfg_handle; cs_error_t err; if (exitcode == CRM_EX_FATAL) { crm_info("Asking Corosync to shut down"); err = corosync_cfg_initialize(&cfg_handle, NULL); if (err != CS_OK) { crm_warn("Unable to open handle to corosync to close it down. err=%d", err); } err = corosync_cfg_try_shutdown(cfg_handle, COROSYNC_CFG_SHUTDOWN_FLAG_IMMEDIATE); if (err != CS_OK) { crm_warn("Corosync shutdown failed. err=%d", err); } corosync_cfg_finalize(cfg_handle); } #endif crm_exit(exitcode); } static void pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode) { pcmk_child_t *child = mainloop_child_userdata(p); const char *name = mainloop_child_name(p); if (signo) { do_crm_log(((signo == SIGKILL)? LOG_WARNING : LOG_ERR), "%s[%d] terminated with signal %d (core=%d)", name, pid, signo, core); } else { switch(exitcode) { case CRM_EX_OK: crm_info("%s[%d] exited with status %d (%s)", name, pid, exitcode, crm_exit_str(exitcode)); break; case CRM_EX_FATAL: crm_warn("Shutting cluster down because %s[%d] had fatal failure", name, pid); child->respawn = FALSE; fatal_error = TRUE; pcmk_shutdown(SIGTERM); break; case CRM_EX_PANIC: do_crm_log_always(LOG_EMERG, "%s[%d] instructed the machine to reset", name, pid); child->respawn = FALSE; fatal_error = TRUE; pcmk_panic(__FUNCTION__); pcmk_shutdown(SIGTERM); break; default: crm_err("%s[%d] exited with status %d (%s)", name, pid, exitcode, crm_exit_str(exitcode)); break; } } pcmk_process_exit(child); } static gboolean stop_child(pcmk_child_t * child, int signal) { if (signal == 0) { signal = SIGTERM; } /* why to skip PID of 1? - FreeBSD ~ how untrackable process behind IPC is masqueraded as - elsewhere: how "init" task is designated; in particular, in systemd arrangement of socket-based activation, this is pretty real */ if (child->command == NULL || child->pid == PCMK__SPECIAL_PID) { crm_debug("Nothing to do for child \"%s\" (process %lld)", child->name, (long long) PCMK__SPECIAL_PID_AS_0(child->pid)); return TRUE; } if (child->pid <= 0) { crm_trace("Client %s not running", child->name); return TRUE; } errno = 0; if (kill(child->pid, signal) == 0) { crm_notice("Stopping %s "CRM_XS" sent signal %d to process %d", child->name, signal, child->pid); } else { crm_perror(LOG_ERR, "Could not stop %s (process %d) with signal %d", child->name, child->pid, signal); } return TRUE; } static char *opts_default[] = { NULL, NULL }; static char *opts_vgrind[] = { NULL, NULL, NULL, NULL, NULL }; /* TODO once libqb is taught to juggle with IPC end-points carried over as bare file descriptor (https://github.com/ClusterLabs/libqb/issues/325) it shall hand over these descriptors here if/once they are successfully pre-opened in (presumably) pcmk_child_active, to avoid any remaining room for races */ static gboolean start_child(pcmk_child_t * child) { int lpc = 0; uid_t uid = 0; gid_t gid = 0; struct rlimit oflimits; gboolean use_valgrind = FALSE; gboolean use_callgrind = FALSE; const char *devnull = "/dev/null"; const char *env_valgrind = getenv("PCMK_valgrind_enabled"); const char *env_callgrind = getenv("PCMK_callgrind_enabled"); child->active_before_startup = FALSE; if (child->command == NULL) { crm_info("Nothing to do for child \"%s\"", child->name); return TRUE; } if (env_callgrind != NULL && crm_is_true(env_callgrind)) { use_callgrind = TRUE; use_valgrind = TRUE; } else if (env_callgrind != NULL && strstr(env_callgrind, child->name)) { use_callgrind = TRUE; use_valgrind = TRUE; } else if (env_valgrind != NULL && crm_is_true(env_valgrind)) { use_valgrind = TRUE; } else if (env_valgrind != NULL && strstr(env_valgrind, child->name)) { use_valgrind = TRUE; } if (use_valgrind && strlen(VALGRIND_BIN) == 0) { crm_warn("Cannot enable valgrind for %s:" " The location of the valgrind binary is unknown", child->name); use_valgrind = FALSE; } if (child->uid) { if (crm_user_lookup(child->uid, &uid, &gid) < 0) { crm_err("Invalid user (%s) for %s: not found", child->uid, child->name); return FALSE; } crm_info("Using uid=%u and group=%u for process %s", uid, gid, child->name); } child->pid = fork(); CRM_ASSERT(child->pid != -1); if (child->pid > 0) { /* parent */ mainloop_child_add(child->pid, 0, child->name, child, pcmk_child_exit); crm_info("Forked child %d for process %s%s", child->pid, child->name, use_valgrind ? " (valgrind enabled: " VALGRIND_BIN ")" : ""); update_node_processes(local_nodeid, NULL, get_process_list()); return TRUE; } else { /* Start a new session */ (void)setsid(); /* Setup the two alternate arg arrays */ opts_vgrind[0] = strdup(VALGRIND_BIN); if (use_callgrind) { opts_vgrind[1] = strdup("--tool=callgrind"); opts_vgrind[2] = strdup("--callgrind-out-file=" CRM_STATE_DIR "/callgrind.out.%p"); opts_vgrind[3] = strdup(child->command); opts_vgrind[4] = NULL; } else { opts_vgrind[1] = strdup(child->command); opts_vgrind[2] = NULL; opts_vgrind[3] = NULL; opts_vgrind[4] = NULL; } opts_default[0] = strdup(child->command); if(gid) { // Whether we need root group access to talk to cluster layer bool need_root_group = TRUE; if (is_corosync_cluster()) { /* Corosync clusters can drop root group access, because we set * uidgid.gid.${gid}=1 via CMAP, which allows these processes to * connect to corosync. */ need_root_group = FALSE; } // Drop root group access if not needed if (!need_root_group && (setgid(gid) < 0)) { crm_perror(LOG_ERR, "Could not set group to %d", gid); } /* Initialize supplementary groups to only those always granted to * the user, plus haclient (so we can access IPC). */ if (initgroups(child->uid, gid) < 0) { crm_err("Cannot initialize groups for %s: %s (%d)", child->uid, pcmk_strerror(errno), errno); } } if (uid && setuid(uid) < 0) { crm_perror(LOG_ERR, "Could not set user to %d (%s)", uid, child->uid); } /* Close all open file descriptors */ getrlimit(RLIMIT_NOFILE, &oflimits); for (lpc = 0; lpc < oflimits.rlim_cur; lpc++) { close(lpc); } (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ if (use_valgrind) { (void)execvp(VALGRIND_BIN, opts_vgrind); } else { (void)execvp(child->command, opts_default); } crm_perror(LOG_ERR, "FATAL: Cannot exec %s", child->command); crm_exit(CRM_EX_FATAL); } return TRUE; /* never reached */ } static gboolean escalate_shutdown(gpointer data) { pcmk_child_t *child = data; if (child->pid == PCMK__SPECIAL_PID) { pcmk_process_exit(child); } else if (child->pid) { /* Use SIGSEGV instead of SIGKILL to create a core so we can see what it was up to */ crm_err("Child %s not terminating in a timely manner, forcing", child->name); stop_child(child, SIGSEGV); } return FALSE; } #define SHUTDOWN_ESCALATION_PERIOD 180000 /* 3m */ static gboolean pcmk_shutdown_worker(gpointer user_data) { static int phase = 0; static time_t next_log = 0; static int max = SIZEOF(pcmk_children); int lpc = 0; if (phase == 0) { crm_notice("Shutting down Pacemaker"); phase = max; } for (; phase > 0; phase--) { /* Don't stop anything with start_seq < 1 */ for (lpc = max - 1; lpc >= 0; lpc--) { pcmk_child_t *child = &(pcmk_children[lpc]); if (phase != child->start_seq) { continue; } if (child->pid) { time_t now = time(NULL); if (child->respawn) { if (child->pid == PCMK__SPECIAL_PID) { crm_warn("The process behind %s IPC cannot be" " terminated, so either wait the graceful" " period of %ld s for its native termination" " if it vitally depends on some other daemons" " going down in a controlled way already," " or locate and kill the correct %s process" " on your own; set PCMK_fail_fast=1 to avoid" " this altogether next time around", child->name, (long) SHUTDOWN_ESCALATION_PERIOD, child->command); } next_log = now + 30; child->respawn = FALSE; stop_child(child, SIGTERM); if (phase < pcmk_children[PCMK_CHILD_CONTROLD].start_seq) { g_timeout_add(SHUTDOWN_ESCALATION_PERIOD, escalate_shutdown, child); } } else if (now >= next_log) { next_log = now + 30; crm_notice("Still waiting for %s to terminate " CRM_XS " pid=%d seq=%d", child->name, child->pid, child->start_seq); } return TRUE; } /* cleanup */ crm_debug("%s confirmed stopped", child->name); child->pid = 0; } } /* send_cluster_id(); */ crm_notice("Shutdown complete"); { const char *delay = daemon_option("shutdown_delay"); if(delay) { sync(); sleep(crm_get_msec(delay) / 1000); } } g_main_loop_quit(mainloop); if (fatal_error) { crm_notice("Shutting down and staying down after fatal error"); pcmk_exit_with_cluster(CRM_EX_FATAL); } return TRUE; } static void pcmk_ignore(int nsig) { crm_info("Ignoring signal %s (%d)", strsignal(nsig), nsig); } static void pcmk_sigquit(int nsig) { pcmk_panic(__FUNCTION__); } void pcmk_shutdown(int nsig) { if (shutdown_trigger == NULL) { shutdown_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, pcmk_shutdown_worker, NULL); } mainloop_set_trigger(shutdown_trigger); } static int32_t pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (crm_client_new(c, uid, gid) == NULL) { return -EIO; } return 0; } static void pcmk_ipc_created(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); } /* Exit code means? */ static int32_t pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; const char *task = NULL; crm_client_t *c = crm_client_get(qbc); xmlNode *msg = crm_ipcs_recv(c, data, size, &id, &flags); crm_ipcs_send_ack(c, id, flags, "ack", __FUNCTION__, __LINE__); if (msg == NULL) { return 0; } task = crm_element_value(msg, F_CRM_TASK); if (crm_str_eq(task, CRM_OP_QUIT, TRUE)) { /* Time to quit */ crm_notice("Shutting down in response to ticket %s (%s)", crm_element_value(msg, F_CRM_REFERENCE), crm_element_value(msg, F_CRM_ORIGIN)); pcmk_shutdown(15); } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) { /* Send to everyone */ struct iovec *iov; int id = 0; const char *name = NULL; crm_element_value_int(msg, XML_ATTR_ID, &id); name = crm_element_value(msg, XML_ATTR_UNAME); crm_notice("Instructing peers to remove references to node %s/%u", name, id); iov = calloc(1, sizeof(struct iovec)); iov->iov_base = dump_xml_unformatted(msg); iov->iov_len = 1 + strlen(iov->iov_base); send_cpg_iov(iov); } else { update_process_clients(c); } free_xml(msg); return 0; } /* Error code means? */ static int32_t pcmk_ipc_closed(qb_ipcs_connection_t * c) { crm_client_t *client = crm_client_get(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); crm_client_destroy(client); return 0; } static void pcmk_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); pcmk_ipc_closed(c); } struct qb_ipcs_service_handlers mcp_ipc_callbacks = { .connection_accept = pcmk_ipc_accept, .connection_created = pcmk_ipc_created, .msg_process = pcmk_ipc_dispatch, .connection_closed = pcmk_ipc_closed, .connection_destroyed = pcmk_ipc_destroy }; /*! * \internal * \brief Send an XML message with process list of all known peers to client(s) * * \param[in] client Send message to this client, or all clients if NULL */ void update_process_clients(crm_client_t *client) { GHashTableIter iter; crm_node_t *node = NULL; xmlNode *update = create_xml_node(NULL, "nodes"); if (is_corosync_cluster()) { crm_xml_add_int(update, "quorate", pcmk_quorate); } g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { xmlNode *xml = create_xml_node(update, "node"); crm_xml_add_int(xml, "id", node->id); crm_xml_add(xml, "uname", node->uname); crm_xml_add(xml, "state", node->state); crm_xml_add_int(xml, "processes", node->processes); } if(client) { crm_trace("Sending process list to client %s", client->id); crm_ipcs_send(client, 0, update, crm_ipc_server_event); } else { crm_trace("Sending process list to %d clients", crm_hash_table_size(client_connections)); g_hash_table_iter_init(&iter, client_connections); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & client)) { crm_ipcs_send(client, 0, update, crm_ipc_server_event); } } free_xml(update); } /*! * \internal * \brief Send a CPG message with local node's process list to all peers */ static void update_process_peers(void) { /* Do nothing for corosync-2 based clusters */ struct iovec *iov = calloc(1, sizeof(struct iovec)); CRM_ASSERT(iov); if (local_name) { iov->iov_base = crm_strdup_printf("", local_name, get_process_list()); } else { iov->iov_base = crm_strdup_printf("", get_process_list()); } iov->iov_len = strlen(iov->iov_base) + 1; crm_trace("Sending %s", (char*) iov->iov_base); send_cpg_iov(iov); } /*! * \internal * \brief Update a node's process list, notifying clients and peers if needed * * \param[in] id Node ID of affected node * \param[in] uname Uname of affected node * \param[in] procs Affected node's process list mask * * \return TRUE if the process list changed, FALSE otherwise */ static gboolean update_node_processes(uint32_t id, const char *uname, uint32_t procs) { gboolean changed = FALSE; crm_node_t *node = crm_get_peer(id, uname); if (procs != 0) { if (procs != node->processes) { crm_debug("Node %s now has process list: %.32x (was %.32x)", node->uname, procs, node->processes); node->processes = procs; changed = TRUE; /* If local node's processes have changed, notify clients/peers */ if (id == local_nodeid) { update_process_clients(NULL); update_process_peers(); } } else { crm_trace("Node %s still has process list: %.32x", node->uname, procs); } } return changed; } /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\tThis text"}, {"version", 0, 0, '$', "\tVersion information" }, {"verbose", 0, 0, 'V', "\tIncrease debug output"}, {"shutdown", 0, 0, 'S', "\tInstruct Pacemaker to shutdown on this machine"}, {"features", 0, 0, 'F', "\tDisplay the full version and list of features Pacemaker was built with"}, {"-spacer-", 1, 0, '-', "\nAdditional Options:"}, {"foreground", 0, 0, 'f', "\t(Ignored) Pacemaker always runs in the foreground"}, {"pid-file", 1, 0, 'p', "\t(Ignored) Daemon pid file location"}, {"standby", 0, 0, 's', "\tStart node in standby state"}, {NULL, 0, 0, 0} }; /* *INDENT-ON* */ static void mcp_chown(const char *path, uid_t uid, gid_t gid) { int rc = chown(path, uid, gid); if (rc < 0) { crm_warn("Cannot change the ownership of %s to user %s and gid %d: %s", path, CRM_DAEMON_USER, gid, pcmk_strerror(errno)); } } /*! * \internal * \brief Check the liveness of the child based on IPC name and PID if tracked * * \param[inout] child Child tracked data * * \return 0 if no trace of child's liveness detected (while it is detectable * to begin with, at least according to one of the two properties), * 1 if everything is fine, 2 if it's up per PID, but not per IPC * end-point (still starting?), -1 on error, and -2 when the child * (its IPC) blocked with an unauthorized process (log message * emitted in both latter cases) * * \note This function doesn't modify any of \p child members but \c pid, * and is not actively toying with processes as such but invoking * \c stop_child in one particular case (there's for some reason * a different authentic holder of the IPC end-point). */ static int pcmk_child_active(pcmk_child_t *child) { static uid_t cl_uid = 0; static gid_t cl_gid = 0; const uid_t root_uid = 0; const gid_t root_gid = 0; const uid_t *ref_uid; const gid_t *ref_gid; int ret = 0; pid_t ipc_pid = 0; if (child->endpoint == NULL && (child->pid <= 0 || child->pid == PCMK__SPECIAL_PID)) { crm_err("Cannot track child %s for missing both API end-point and PID", child->name); ret = -1; /* misuse of the function when child is not trackable */ } else if (child->endpoint != NULL) { ref_uid = (child->uid != NULL) ? &cl_uid : &root_uid; ref_gid = (child->uid != NULL) ? &cl_gid : &root_gid; if (child->uid != NULL && !cl_uid && !cl_gid && crm_user_lookup(CRM_DAEMON_USER, &cl_uid, &cl_gid) < 0) { crm_err("Could not find user and group IDs for user %s", CRM_DAEMON_USER); ret = -1; } else if ((ret = pcmk__ipc_is_authentic_process_active(child->endpoint, *ref_uid, *ref_gid, &ipc_pid)) < 0) { /* game over */ } else if (child->pid <= 0) { /* hit new child to be initialized, or reset to zero and investigate further for ret == 0 */ child->pid = ipc_pid; } else if (ipc_pid && child->pid != ipc_pid) { /* ultimately strange for ret == 1; either way, investigate */ ret = 0; } } if (!ret) { /* when no IPC based liveness detected (incl. if ever a child without IPC is tracked), or detected for a different _authentic_ process; safe on FreeBSD since the only change possible from a proper child's PID into "special" PID of 1 behind more loosely related process */ ret = crm_pid_active(child->pid, child->name); if (ipc_pid && (ret != 1 || ipc_pid == PCMK__SPECIAL_PID || crm_pid_active(ipc_pid, child->name) == 1)) { if (ret == 1) { /* assume there's no forking-while-retaining-IPC-socket involved in the "children's" lifecycle, hence that the tracking got out of sync purely because of some external (esotheric?) forces (user initiated process "refresh" by force? or intentionally racing on start-up, even?), and that switching over to this other detected, authentic instance with an IPC already in possession is a better trade-off than "neutralizing" it first so as to give either the original or possibly a new to-be-spawned daemon process a leeway for operation, which would otherwise have to be carried out */ /* not possessing IPC, afterall (what about corosync CPG?) */ stop_child(child, SIGKILL); } else { ret = 1; } child->pid = ipc_pid; } else if (ret == 1) { ret = 2; /* up per PID, but not per IPC (still starting?) */ } else if (!child->pid && ret == -1) { ret = 0; /* correct -1 on FreeBSD from above back to 0 */ } } return ret; } static gboolean check_active_before_startup_processes(gpointer user_data) { int start_seq = 1, lpc = 0; static int max = SIZEOF(pcmk_children); gboolean keep_tracking = FALSE; for (start_seq = 1; start_seq < max; start_seq++) { for (lpc = 0; lpc < max; lpc++) { if (pcmk_children[lpc].active_before_startup == FALSE) { /* we are already tracking it as a child process. */ continue; } else if (start_seq != pcmk_children[lpc].start_seq) { continue; } else { const char *name = pcmk_children[lpc].name; int ret; switch ((ret = pcmk_child_active(&pcmk_children[lpc]))) { case 1: break; case 0: case 2: /* this very case: it was OK once already */ if (pcmk_children[lpc].respawn == TRUE) { crm_err("%s[%d] terminated%s", name, PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid), ret ? " as IPC server" : ""); } else { /* orderly shutdown */ crm_notice("%s[%d] terminated%s", name, PCMK__SPECIAL_PID_AS_0(pcmk_children[lpc].pid), ret ? " as IPC server" : ""); } pcmk_process_exit(&(pcmk_children[lpc])); continue; default: crm_crit("Unexpected value from pcmk_child_active:" " %d (pid=%lld)", ret, (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[lpc].pid)); /* fall through */ case -1: case -2: /* message(s) already emitted */ crm_exit(CRM_EX_FATAL); break; /* static analysis/noreturn */ } } /* at least one of the processes found at startup * is still going, so keep this recurring timer around */ keep_tracking = TRUE; } } global_keep_tracking = keep_tracking; return keep_tracking; } /*! * \internal * \brief Initial one-off check of the pre-existing "child" processes * * With "child" process, we mean the subdaemon that defines an API end-point * (all of them do as of the comment) -- the possible complement is skipped * as it is deemed it has no such shared resources to cause conflicts about, * hence it can presumably be started anew without hesitation. * If that won't hold true in the future, the concept of a shared resource * will have to be generalized beyond the API end-point. * * For boundary cases that the "child" is still starting (IPC end-point is yet * to be witnessed), or more rarely (practically FreeBSD only), when there's * a pre-existing "untrackable" authentic process, we give the situation some * time to possibly unfold in the right direction, meaning that said socket * will appear or the unattainable process will disappear per the observable * IPC, respectively. * * \return 0 if no such "child" process found, positive number X when X * "children" detected, -1 on an internal error, -2 when any * would-be-used IPC is blocked with an unauthorized process * * \note Since this gets run at the very start, \c respawn_count fields * for particular children get temporarily overloaded with "rounds * of waiting" tracking, restored once we are about to finish with * success (i.e. returning value >=0) and will remain unrestored * otherwise. One way to suppress liveness detection logic for * particular child is to set the said value to a negative number. */ #define WAIT_TRIES 4 /* together with interleaved sleeps, worst case ~ 1s */ static int find_and_track_existing_processes(void) { unsigned tracking = 0U; bool wait_in_progress; int cur; size_t i, rounds; for (rounds = 1; rounds <= WAIT_TRIES; rounds++) { wait_in_progress = false; for (i = 0; i < SIZEOF(pcmk_children); i++) { if (!pcmk_children[i].endpoint || pcmk_children[i].respawn_count < 0 || !(cur = pcmk_child_active(&pcmk_children[i]))) { /* as a speculation, don't give up in the context of pcmk_child_active check if there are more rounds to come for other reasons, but don't artificially wait just because of this, since we would preferably start ASAP */ continue; } pcmk_children[i].respawn_count = rounds; switch (cur) { case 1: if (pcmk_children[i].pid == PCMK__SPECIAL_PID) { if (crm_is_true(getenv("PCMK_fail_fast"))) { crm_crit("Cannot reliably track pre-existing" " authentic process behind %s IPC on this" " platform and PCMK_fail_fast requested", pcmk_children[i].endpoint); return -1; } else if (pcmk_children[i].respawn_count == WAIT_TRIES) { crm_notice("Assuming pre-existing authentic, though" " on this platform untrackable, process" " behind %s IPC is stable (was in %d" " previous samples) so rather than" " bailing out (PCMK_fail_fast not" " requested), we just switch to a less" " optimal IPC liveness monitoring" " (not very suitable for heavy load)", pcmk_children[i].name, WAIT_TRIES - 1); crm_warn("The process behind %s IPC cannot be" " terminated, so the overall shutdown" " will get delayed implicitly (%ld s)," " which serves as a graceful period for" " its native termination if it vitally" " depends on some other daemons going" " down in a controlled way already", pcmk_children[i].name, (long) SHUTDOWN_ESCALATION_PERIOD); } else { wait_in_progress = true; crm_warn("Cannot reliably track pre-existing" " authentic process behind %s IPC on this" " platform, can still disappear in %d" " attempt(s)", pcmk_children[i].endpoint, WAIT_TRIES - pcmk_children[i].respawn_count); continue; } } crm_notice("Tracking existing %s process (pid=%lld)", pcmk_children[i].name, (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[i].pid)); pcmk_children[i].respawn_count = -1; /* 0~keep watching */ pcmk_children[i].active_before_startup = TRUE; tracking++; break; case 2: if (pcmk_children[i].respawn_count == WAIT_TRIES) { crm_crit("%s IPC end-point for existing authentic" " process %lld did not (re)appear", pcmk_children[i].endpoint, (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[i].pid)); return -1; } wait_in_progress = true; crm_warn("Cannot find %s IPC end-point for existing" " authentic process %lld, can still (re)appear" " in %d attempts (?)", pcmk_children[i].endpoint, (long long) PCMK__SPECIAL_PID_AS_0( pcmk_children[i].pid), WAIT_TRIES - pcmk_children[i].respawn_count); continue; case -1: case -2: return cur; /* messages already emitted */ default: crm_crit("Unexpected condition"CRM_XS"cur=%d", cur); return -1; /* unexpected condition */ } } if (!wait_in_progress) { break; } (void) poll(NULL, 0, 250); /* a bit for changes to possibly happen */ } for (i = 0; i < SIZEOF(pcmk_children); i++) { pcmk_children[i].respawn_count = 0; /* restore pristine state */ } if (tracking) { g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL, check_active_before_startup_processes, NULL); } return (tracking > INT_MAX) ? INT_MAX : tracking; } static void init_children_processes(void) { int start_seq = 1, lpc = 0; static int max = SIZEOF(pcmk_children); /* start any children that have not been detected */ for (start_seq = 1; start_seq < max; start_seq++) { /* don't start anything with start_seq < 1 */ for (lpc = 0; lpc < max; lpc++) { if (pcmk_children[lpc].pid) { /* we are already tracking it */ continue; } if (start_seq == pcmk_children[lpc].start_seq) { start_child(&(pcmk_children[lpc])); } } } /* From this point on, any daemons being started will be due to * respawning rather than node start. * * This may be useful for the daemons to know */ setenv("PCMK_respawned", "true", 1); } static void mcp_cpg_destroy(gpointer user_data) { crm_crit("Lost connection to cluster layer, shutting down"); crm_exit(CRM_EX_DISCONNECT); } /*! * \internal * \brief Process a CPG message (process list or manual peer cache removal) * * \param[in] handle CPG connection (ignored) * \param[in] groupName CPG group name (ignored) * \param[in] nodeid ID of affected node * \param[in] pid Process ID (ignored) * \param[in] msg CPG XML message * \param[in] msg_len Length of msg in bytes (ignored) */ static void mcp_cpg_deliver(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { xmlNode *xml = string2xml(msg); const char *task = crm_element_value(xml, F_CRM_TASK); crm_trace("Received CPG message (%s): %.200s", (task? task : "process list"), (char*)msg); if (task == NULL) { if (nodeid == local_nodeid) { crm_debug("Ignoring message with local node's process list"); } else { uint32_t procs = 0; const char *uname = crm_element_value(xml, "uname"); crm_element_value_int(xml, "proclist", (int *)&procs); if (update_node_processes(nodeid, uname, procs)) { update_process_clients(NULL); } } } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) { int id = 0; const char *name = NULL; crm_element_value_int(xml, XML_ATTR_ID, &id); name = crm_element_value(xml, XML_ATTR_UNAME); reap_crm_member(id, name); } if (xml != NULL) { free_xml(xml); } } static void mcp_cpg_membership(cpg_handle_t handle, const struct cpg_name *groupName, const struct cpg_address *member_list, size_t member_list_entries, const struct cpg_address *left_list, size_t left_list_entries, const struct cpg_address *joined_list, size_t joined_list_entries) { /* Update peer cache if needed */ pcmk_cpg_membership(handle, groupName, member_list, member_list_entries, left_list, left_list_entries, joined_list, joined_list_entries); /* Always broadcast our own presence after any membership change */ update_process_peers(); } static gboolean mcp_quorum_callback(unsigned long long seq, gboolean quorate) { pcmk_quorate = quorate; return TRUE; } static void mcp_quorum_destroy(gpointer user_data) { crm_info("connection lost"); } int main(int argc, char **argv) { int rc; int flag; int argerr = 0; int option_index = 0; gboolean shutdown = FALSE; uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; struct rlimit cores; crm_ipc_t *old_instance = NULL; qb_ipcs_service_t *ipcs = NULL; static crm_cluster_t cluster; crm_log_preinit(NULL, argc, argv); crm_set_options(NULL, "mode [options]", long_options, "Start/Stop Pacemaker\n"); mainloop_add_signal(SIGHUP, pcmk_ignore); mainloop_add_signal(SIGQUIT, pcmk_sigquit); while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case 'f': /* Legacy */ break; case 'p': pid_file = optarg; break; case 's': set_daemon_option("node_start_state", "standby"); break; case '$': case '?': crm_help(flag, CRM_EX_OK); break; case 'S': shutdown = TRUE; break; case 'F': printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURE_SET, CRM_FEATURES); crm_exit(CRM_EX_OK); default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); } if (argerr) { crm_help('?', CRM_EX_USAGE); } setenv("LC_ALL", "C", 1); set_daemon_option("mcp", "true"); crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); crm_debug("Checking for existing Pacemaker instance"); old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0); (void) crm_ipc_connect(old_instance); if (shutdown) { crm_debug("Shutting down existing Pacemaker instance by request"); while (crm_ipc_connected(old_instance)) { xmlNode *cmd = create_request(CRM_OP_QUIT, NULL, NULL, CRM_SYSTEM_MCP, CRM_SYSTEM_MCP, NULL); crm_debug("."); crm_ipc_send(old_instance, cmd, 0, 0, NULL); free_xml(cmd); sleep(2); } crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_exit(CRM_EX_OK); } else if (crm_ipc_connected(old_instance)) { crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_err("Aborting start-up because active Pacemaker instance found"); crm_exit(CRM_EX_FATAL); } crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); if (mcp_read_config() == FALSE) { crm_notice("Could not obtain corosync config data, exiting"); crm_exit(CRM_EX_UNAVAILABLE); } // OCF shell functions and cluster-glue need facility under different name { const char *facility = daemon_option("logfacility"); if (facility && safe_str_neq(facility, "none")) { setenv("HA_LOGFACILITY", facility, 1); } } crm_notice("Starting Pacemaker %s "CRM_XS" build=%s features:%s", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); mainloop = g_main_loop_new(NULL, FALSE); rc = getrlimit(RLIMIT_CORE, &cores); if (rc < 0) { crm_perror(LOG_ERR, "Cannot determine current maximum core size."); } else { if (cores.rlim_max == 0 && geteuid() == 0) { cores.rlim_max = RLIM_INFINITY; } else { crm_info("Maximum core file size is: %lu", (unsigned long)cores.rlim_max); } cores.rlim_cur = cores.rlim_max; rc = setrlimit(RLIMIT_CORE, &cores); if (rc < 0) { crm_perror(LOG_ERR, "Core file generation will remain disabled." " Core files are an important diagnostic tool, so" " please consider enabling them by default."); } } if (crm_user_lookup(CRM_DAEMON_USER, &pcmk_uid, &pcmk_gid) < 0) { crm_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER); crm_exit(CRM_EX_NOUSER); } // Used by some resource agents if ((mkdir(CRM_STATE_DIR, 0750) < 0) && (errno != EEXIST)) { crm_warn("Could not create " CRM_STATE_DIR ": %s", pcmk_strerror(errno)); } else { mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid); } /* Used to store core/blackbox/scheduler/cib files in */ crm_build_path(CRM_PACEMAKER_DIR, 0750); mcp_chown(CRM_PACEMAKER_DIR, pcmk_uid, pcmk_gid); /* Used to store core files in */ crm_build_path(CRM_CORE_DIR, 0750); mcp_chown(CRM_CORE_DIR, pcmk_uid, pcmk_gid); /* Used to store blackbox dumps in */ crm_build_path(CRM_BLACKBOX_DIR, 0750); mcp_chown(CRM_BLACKBOX_DIR, pcmk_uid, pcmk_gid); // Used to store scheduler inputs in crm_build_path(PE_STATE_DIR, 0750); mcp_chown(PE_STATE_DIR, pcmk_uid, pcmk_gid); /* Used to store the cluster configuration */ crm_build_path(CRM_CONFIG_DIR, 0750); mcp_chown(CRM_CONFIG_DIR, pcmk_uid, pcmk_gid); // Don't build CRM_RSCTMP_DIR, pacemaker-execd will do it ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, &mcp_ipc_callbacks); if (ipcs == NULL) { crm_err("Couldn't start IPC server"); crm_exit(CRM_EX_OSERR); } /* Allows us to block shutdown */ if (cluster_connect_cfg(&local_nodeid) == FALSE) { crm_err("Couldn't connect to Corosync's CFG service"); crm_exit(CRM_EX_PROTOCOL); } if(pcmk_locate_sbd() > 0) { setenv("PCMK_watchdog", "true", 1); } else { setenv("PCMK_watchdog", "false", 1); } switch (find_and_track_existing_processes()) { case -1: crm_crit("Internal fatality, see the log"); crm_exit(CRM_EX_FATAL); case -2: crm_crit("Blocked by foreign process, kill the offender"); crm_exit(CRM_EX_CANTCREAT); default: break; }; cluster.destroy = mcp_cpg_destroy; cluster.cpg.cpg_deliver_fn = mcp_cpg_deliver; cluster.cpg.cpg_confchg_fn = mcp_cpg_membership; crm_set_autoreap(FALSE); rc = pcmk_ok; if (cluster_connect_cpg(&cluster) == FALSE) { crm_err("Couldn't connect to Corosync's CPG service"); rc = -ENOPROTOOPT; } else if (cluster_connect_quorum(mcp_quorum_callback, mcp_quorum_destroy) == FALSE) { rc = -ENOTCONN; } else { local_name = get_local_node_name(); update_node_processes(local_nodeid, local_name, get_process_list()); mainloop_add_signal(SIGTERM, pcmk_shutdown); mainloop_add_signal(SIGINT, pcmk_shutdown); init_children_processes(); crm_notice("Pacemaker daemon successfully started and accepting connections"); g_main_loop_run(mainloop); } if (ipcs) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; } g_main_loop_unref(mainloop); cluster_disconnect_cpg(&cluster); cluster_disconnect_cfg(); crm_exit(crm_errno2exit(rc)); } diff --git a/lib/common/watchdog.c b/lib/common/watchdog.c index 54c25112f0..c73c8ab58f 100644 --- a/lib/common/watchdog.c +++ b/lib/common/watchdog.c @@ -1,268 +1,269 @@ /* - * Copyright 2013 Lars Marowsky-Bree - * 2014-2018 Andrew Beekhof + * Copyright 2013-2019 the Pacemaker project contributors + * + * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #ifdef _POSIX_MEMLOCK # include #endif static int sbd_pid = 0; enum pcmk_panic_flags { pcmk_panic_none = 0x00, pcmk_panic_delay = 0x01, pcmk_panic_kdump = 0x02, pcmk_panic_shutdown = 0x04, }; static void sysrq_trigger(char t) { #if SUPPORT_PROCFS FILE *procf; // Root can always write here, regardless of kernel.sysrq value procf = fopen("/proc/sysrq-trigger", "a"); if (!procf) { crm_perror(LOG_WARNING, "Opening sysrq-trigger failed"); return; } crm_info("sysrq-trigger: %c", t); fprintf(procf, "%c\n", t); fclose(procf); #endif // SUPPORT_PROCFS return; } static void pcmk_panic_local(void) { int rc = pcmk_ok; uid_t uid = geteuid(); pid_t ppid = getppid(); if(uid != 0 && ppid > 1) { /* We're a non-root pacemaker daemon (pacemaker-based, * pacemaker-controld, pacemaker-schedulerd, pacemaker-attrd, etc.) with * the original pacemakerd parent. * * Of these, only the controller is likely to be initiating resets. */ do_crm_log_always(LOG_EMERG, "Signaling parent %d to panic", ppid); crm_exit(CRM_EX_PANIC); return; } else if (uid != 0) { #if SUPPORT_PROCFS /* * No permissions, and no pacemakerd parent to escalate to. * Track down the new pacemakerd process and send a signal instead. */ union sigval signal_value; memset(&signal_value, 0, sizeof(signal_value)); ppid = crm_procfs_pid_of("pacemakerd"); do_crm_log_always(LOG_EMERG, "Signaling pacemakerd(%d) to panic", ppid); if(ppid > 1 && sigqueue(ppid, SIGQUIT, signal_value) < 0) { crm_perror(LOG_EMERG, "Cannot signal pacemakerd(%d) to panic", ppid); } #endif // SUPPORT_PROCFS /* The best we can do now is die */ crm_exit(CRM_EX_PANIC); return; } /* We're either pacemakerd, or a pacemaker daemon running as root */ if (safe_str_eq("crash", getenv("PCMK_panic_action"))) { sysrq_trigger('c'); } else { sysrq_trigger('b'); } /* reboot(RB_HALT_SYSTEM); rc = errno; */ reboot(RB_AUTOBOOT); rc = errno; do_crm_log_always(LOG_EMERG, "Reboot failed, escalating to %d: %s (%d)", ppid, pcmk_strerror(rc), rc); if(ppid > 1) { /* child daemon */ exit(CRM_EX_PANIC); } else { /* pacemakerd or orphan child */ exit(CRM_EX_FATAL); } } static void pcmk_panic_sbd(void) { union sigval signal_value; pid_t ppid = getppid(); do_crm_log_always(LOG_EMERG, "Signaling sbd(%d) to panic", sbd_pid); memset(&signal_value, 0, sizeof(signal_value)); /* TODO: Arrange for a slightly less brutal option? */ if(sigqueue(sbd_pid, SIGKILL, signal_value) < 0) { crm_perror(LOG_EMERG, "Cannot signal SBD(%d) to terminate", sbd_pid); pcmk_panic_local(); } if(ppid > 1) { /* child daemon */ exit(CRM_EX_PANIC); } else { /* pacemakerd or orphan child */ exit(CRM_EX_FATAL); } } void pcmk_panic(const char *origin) { static struct qb_log_callsite *panic_cs = NULL; if (panic_cs == NULL) { panic_cs = qb_log_callsite_get(__func__, __FILE__, "panic-delay", LOG_TRACE, __LINE__, crm_trace_nonlog); } /* Ensure sbd_pid is set */ (void)pcmk_locate_sbd(); if (panic_cs && panic_cs->targets) { /* getppid() == 1 means our original parent no longer exists */ do_crm_log_always(LOG_EMERG, "Shutting down instead of panicking the node: origin=%s, sbd=%d, parent=%d", origin, sbd_pid, getppid()); crm_exit(CRM_EX_FATAL); return; } if(sbd_pid > 1) { do_crm_log_always(LOG_EMERG, "Signaling sbd(%d) to panic the system: %s", sbd_pid, origin); pcmk_panic_sbd(); } else { do_crm_log_always(LOG_EMERG, "Panicking the system directly: %s", origin); pcmk_panic_local(); } } pid_t pcmk_locate_sbd(void) { char *pidfile = NULL; char *sbd_path = NULL; if(sbd_pid > 1) { return sbd_pid; } /* Look for the pid file */ - pidfile = crm_strdup_printf("%s/sbd.pid", HA_STATE_DIR); + pidfile = crm_strdup_printf(PCMK_RUN_DIR "/sbd.pid"); sbd_path = crm_strdup_printf("%s/sbd", SBIN_DIR); /* Read the pid file */ CRM_ASSERT(pidfile); sbd_pid = crm_pidfile_inuse(pidfile, 0, sbd_path); if(sbd_pid > 0) { crm_trace("SBD detected at pid=%d (file)", sbd_pid); #if SUPPORT_PROCFS } else { /* Fall back to /proc for systems that support it */ sbd_pid = crm_procfs_pid_of("sbd"); crm_trace("SBD detected at pid=%d (proc)", sbd_pid); #endif // SUPPORT_PROCFS } if(sbd_pid < 0) { sbd_pid = 0; crm_trace("SBD not detected"); } free(pidfile); free(sbd_path); return sbd_pid; } long crm_get_sbd_timeout(void) { static long sbd_timeout = -2; if (sbd_timeout == -2) { sbd_timeout = crm_get_msec(getenv("SBD_WATCHDOG_TIMEOUT")); } return sbd_timeout; } long crm_auto_watchdog_timeout() { long sbd_timeout = crm_get_sbd_timeout(); return (sbd_timeout <= 0)? 0 : (2 * sbd_timeout); } gboolean check_sbd_timeout(const char *value) { long st_timeout = value? crm_get_msec(value) : 0; if (st_timeout < 0) { st_timeout = crm_auto_watchdog_timeout(); crm_debug("Using calculated value %ld for stonith-watchdog-timeout (%s)", st_timeout, value); } if (st_timeout == 0) { crm_debug("Watchdog may be enabled but stonith-watchdog-timeout is disabled (%s)", value? value : "default"); } else if (pcmk_locate_sbd() == 0) { do_crm_log_always(LOG_EMERG, "Shutting down: stonith-watchdog-timeout configured (%s) but SBD not active", (value? value : "auto")); crm_exit(CRM_EX_FATAL); return FALSE; } else { long sbd_timeout = crm_get_sbd_timeout(); if (st_timeout < sbd_timeout) { do_crm_log_always(LOG_EMERG, "Shutting down: stonith-watchdog-timeout (%s) too short (must be >%ldms)", value, sbd_timeout); crm_exit(CRM_EX_FATAL); return FALSE; } crm_info("Watchdog configured with stonith-watchdog-timeout %s and SBD timeout %ldms", value, sbd_timeout); } return TRUE; } diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in index 2d6d8ae7f2..439e81a200 100644 --- a/rpm/pacemaker.spec.in +++ b/rpm/pacemaker.spec.in @@ -1,875 +1,884 @@ # Globals and defines to control package behavior (configure these as desired) ## User and group to use for nonprivileged services %global uname hacluster %global gname haclient ## Where to install Pacemaker documentation %if 0%{?suse_version} > 0 %global pcmk_docdir %{_docdir}/%{name}-%{version} %else %global pcmk_docdir %{_docdir}/%{name} %endif ## GitHub entity that distributes source (for ease of using a fork) %global github_owner ClusterLabs ## Upstream pacemaker version, and its package version (specversion ## can be incremented to build packages reliably considered "newer" ## than previously built packages with the same pcmkversion) %global pcmkversion 2.0.2 %global specversion 1 ## Upstream commit (or git tag, such as "Pacemaker-" plus the ## {pcmkversion} macro for an official release) to use for this package %global commit HEAD ## Since git v2.11, the extent of abbreviation is autoscaled by default ## (used to be constant of 7), so we need to convey it for non-tags, too. %global commit_abbrev 7 ## Python major version to use (2, 3, or 0 for auto-detect) %global python_major 0 # Define globals for convenient use later ## Workaround to use parentheses in other globals %global lparen ( %global rparen ) ## Short version of git commit %define shortcommit %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo ${c:10};; *%{rparen} echo ${c:0:%{commit_abbrev}};; esac) ## Whether this is a tagged release %define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?) ## Whether this is a release candidate (in case of a tagged release) %define pre_release %([ "%{tag_release}" -eq 0 ] || { case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;; esac; }; echo $?) ## Heuristic used to infer bleeding-edge deployments that are ## less likely to have working versions of the documentation tools %define bleeding %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) ## Whether this platform defaults to using systemd as an init system ## (needs to be evaluated prior to BuildRequires being enumerated and ## installed as it's intended to conditionally select some of these, and ## for that there are only few indicators with varying reliability: ## - presence of systemd-defined macros (when building in a full-fledged ## environment, which is not the case with ordinary mock-based builds) ## - systemd-aware rpm as manifested with the presence of particular ## macro (rpm itself will trivially always be present when building) ## - existence of /usr/lib/os-release file, which is something heavily ## propagated by systemd project ## - when not good enough, there's always a possibility to check ## particular distro-specific macros (incl. version comparison) %define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \ } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \ } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?)) %if 0%{?fedora} > 20 || 0%{?rhel} > 7 ## Base GnuTLS cipher priorities (presumably only the initial, required keyword) ## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'" %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM} %endif +%if !%{defined _rundir} +%if 0%{?fedora} >= 15 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1200 +%define _rundir /run +%else +%define _rundir /var/run +%endif +%endif + ## Different distros name certain packages differently %if 0%{?suse_version} > 0 %global pkgname_bzip2_devel libbz2-devel %global pkgname_docbook_xsl docbook-xsl-stylesheets %global pkgname_gnutls_devel libgnutls-devel %global pkgname_shadow_utils shadow %global pkgname_procps procps %global pkgname_corosync_lib libcorosync %global pkgname_glue_libs libglue %global pkgname_pcmk_libs lib%{name}3 %global hacluster_id 90 %else %global pkgname_libtool_devel libtool-ltdl-devel %global pkgname_libtool_devel_arch libtool-ltdl-devel%{?_isa} %global pkgname_bzip2_devel bzip2-devel %global pkgname_docbook_xsl docbook-style-xsl %global pkgname_gnutls_devel gnutls-devel %global pkgname_shadow_utils shadow-utils %global pkgname_procps procps-ng %global pkgname_publican publican %global pkgname_corosync_lib corosynclib %global pkgname_glue_libs cluster-glue-libs %global pkgname_pcmk_libs %{name}-libs %global hacluster_id 189 %endif # Python-related definitions ## Use Python 3 on certain platforms if major version not specified %if %{?python_major} == 0 %if 0%{?fedora} > 26 || 0%{?rhel} > 7 %global python_major 3 %endif %endif ## Turn off auto-compilation of Python files outside Python specific paths, ## so there's no risk that unexpected "__python" macro gets picked to do the ## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected) ## -- distro-dependent tricks or automake's fallback to be applied there %if %{defined _python_bytecompile_extra} %global _python_bytecompile_extra 0 %else ### the statement effectively means no RPM-native byte-compiling will occur at ### all, so distro-dependent tricks for Python-specific packages to be applied %global __os_install_post %(echo '%{__os_install_post}' | { sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; }) %endif ## Values that differ by Python major version %if 0%{?python_major} > 2 %global python_name python3 %global python_path %{?__python3}%{!?__python3:/usr/bin/python%{?python3_pkgversion}%{!?python3_pkgversion:3}} %define python_site %{?python3_sitelib}%{!?python3_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else %if 0%{?python_major} > 1 %global python_name python2 %global python_path %{?__python2}%{!?__python2:/usr/bin/python%{?python2_pkgversion}%{!?python2_pkgversion:2}} %define python_site %{?python2_sitelib}%{!?python2_sitelib:%( %{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %else %global python_name python %global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}} %define python_site %{?python_sitelib}%{!?python_sitelib:%( python -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)} %endif %endif # Definitions for backward compatibility with older RPM versions ## Ensure the license macro behaves consistently (older RPM will otherwise ## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts: ## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77 %if !%{defined _licensedir} %define description %{lua: rpm.define("license %doc") print("%description") } %endif # Define conditionals so that "rpmbuild --with " and # "rpmbuild --without " can enable and disable specific features ## Add option to enable support for stonith/external fencing agents %bcond_with stonithd ## Add option to create binaries suitable for use with profiling tools %bcond_with profiling ## Add option to create binaries with coverage analysis %bcond_with coverage ## Add option to skip generating documentation ## (the build tools aren't available everywhere) %bcond_without doc ## Add option to prefix package version with "0." ## (so later "official" packages will be considered updates) %bcond_with pre_release ## Add option to ship Upstart job files %bcond_with upstart_job ## Add option to turn off hardening of libraries and daemon executables %bcond_without hardening ## Add option to disable links for legacy daemon names %bcond_without legacy_links # Keep sane profiling data if requested %if %{with profiling} ## Disable -debuginfo package and stripping binaries/libraries %define debug_package %{nil} %endif # Define the release version # (do not look at externally enforced pre-release flag for tagged releases # as only -rc tags, captured with the second condition, implies that then) %if (!%{tag_release} && %{with pre_release}) || 0%{pre_release} %if 0%{pre_release} %define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) %else %define pcmk_release 0.%{specversion}.%{shortcommit}.git %endif %else %if 0%{tag_release} %define pcmk_release %{specversion} %else %define pcmk_release %{specversion}.%{shortcommit}.git %endif %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: %{pcmkversion} Release: %{pcmk_release}%{?dist} %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Url: http://www.clusterlabs.org Group: System Environment/Daemons # Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL: # https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz Requires: resource-agents Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} %if !%{defined _unitdir} Requires: %{pkgname_procps} Requires: psmisc %endif %{?systemd_requires} Requires: %{python_path} BuildRequires: %{python_name}-devel # Pacemaker requires a minimum libqb functionality Requires: libqb >= 0.13.0 BuildRequires: libqb-devel >= 0.13.0 # Basics required for the build (even if usually satisfied through other BRs) BuildRequires: coreutils findutils grep sed # Required for core functionality BuildRequires: automake autoconf gcc libtool pkgconfig %{?pkgname_libtool_devel} BuildRequires: pkgconfig(glib-2.0) >= 2.16 BuildRequires: libxml2-devel libxslt-devel libuuid-devel BuildRequires: %{pkgname_bzip2_devel} # Enables optional functionality BuildRequires: ncurses-devel %{pkgname_docbook_xsl} BuildRequires: help2man %{pkgname_gnutls_devel} pam-devel pkgconfig(dbus-1) %if %{systemd_native} BuildRequires: pkgconfig(systemd) %endif Requires: corosync >= 2.0.0 BuildRequires: %{pkgname_corosync_lib}-devel >= 2.0.0 %if %{with stonithd} BuildRequires: %{pkgname_glue_libs}-devel %endif ## (note no avoiding effect when building through non-customized mock) %if !%{bleeding} %if %{with doc} BuildRequires: inkscape asciidoc %{?pkgname_publican} %endif %endif Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} # Bundled bits ## Pacemaker uses the crypto/md5-buffer module from gnulib %if 0%{?fedora} || 0%{?rhel} Provides: bundled(gnulib) %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager. It supports more than 16 node clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --with(out) : coverage doc stonithd hardening pre_release profiling upstart_job %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Recommends: pcmk-cluster-manager = %{version}-%{release} # For crm_report Recommends: tar Recommends: bzip2 %endif Requires: perl-TimeDate Requires: %{pkgname_procps} Requires: psmisc Requires(post):coreutils %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{pkgname_pcmk_libs} License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries Group: System Environment/Daemons Requires(pre): %{pkgname_shadow_utils} Requires: %{name}-schemas = %{version}-%{release} # sbd 1.4.0+ supports the libpe_status API for pe_working_set_t Conflicts: sbd < 1.4.0 %description -n %{pkgname_pcmk_libs} Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs} package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} %description cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package remote %if %{defined _unitdir} License: GPLv2+ and LGPLv2+ %else # initscript is Revised BSD License: GPLv2+ and LGPLv2+ and BSD %endif Summary: Pacemaker remote daemon for non-cluster nodes Group: System Environment/Daemons Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: resource-agents %if !%{defined _unitdir} Requires: %{pkgname_procps} %endif # -remote can be fully independent of systemd %{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}} Provides: pcmk-cluster-manager = %{version}-%{release} Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release} %description remote Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{name}-remote package contains the Pacemaker Remote daemon which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{pkgname_pcmk_libs}-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release} Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release} Requires: libuuid-devel%{?_isa} %{?pkgname_libtool_devel_arch} Requires: libxml2-devel%{?_isa} libxslt-devel%{?_isa} Requires: %{pkgname_bzip2_devel}%{?_isa} glib2-devel%{?_isa} Requires: libqb-devel%{?_isa} Requires: %{pkgname_corosync_lib}-devel%{?_isa} >= 2.0.0 %description -n %{pkgname_pcmk_libs}-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager. The %{pkgname_pcmk_libs}-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: %{python_path} Requires: %{pkgname_pcmk_libs} = %{version}-%{release} Requires: %{pkgname_procps} Requires: psmisc BuildArch: noarch # systemd python bindings are separate package in some distros %if %{defined systemd_requires} %if 0%{?fedora} > 22 || 0%{?rhel} > 7 Requires: %{python_name}-systemd %else %if 0%{?fedora} > 20 || 0%{?rhel} > 6 Requires: systemd-python %endif %endif %endif %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: CC-BY-SA-4.0 Summary: Documentation for Pacemaker Group: Documentation BuildArch: noarch %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager. %package schemas License: GPLv2+ Summary: Schemas and upgrade stylesheets for Pacemaker BuildArch: noarch %description schemas Schemas and upgrade stylesheets for Pacemaker Pacemaker is an advanced, scalable High-Availability cluster resource manager. %prep %setup -q -n %{name}-%{commit} %build # Early versions of autotools (e.g. RHEL <= 5) do not support --docdir export docdir=%{pcmk_docdir} -export systemdunitdir=%{?_unitdir}%{!?_unitdir:no} +export systemdsystemunitdir=%{?_unitdir}%{!?_unitdir:no} %if %{with hardening} # prefer distro-provided hardening flags in case they are defined # through _hardening_{c,ld}flags macros, configure script will # use its own defaults otherwise; if such hardenings are completely # undesired, rpmbuild using "--without hardening" # (or "--define '_without_hardening 1'") export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" %endif ./autogen.sh %{configure} \ PYTHON=%{python_path} \ %{!?with_hardening: --disable-hardening} \ %{!?with_legacy_links: --disable-legacy-links} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{!?with_doc: --with-brand=} \ %{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \ --with-initdir=%{_initrddir} \ + --with-runstatedir=%{_rundir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif make %{_smp_mflags} V=1 %check { cts/cts-scheduler --run load-stopped-loop \ && cts/cts-cli \ && touch .CHECKED } 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint [ -f .CHECKED ] && rm -f -- .CHECKED exit $? # TODO remove when rpm<4.14 compatibility irrelevant %install # skip automake-native Python byte-compilation, since RPM-native one (possibly # distro-confined to Python-specific directories, which is currently the only # relevant place, anyway) assures proper intrinsic alignment with wider system # (such as with py_byte_compile macro, which is concurrent Fedora/EL specific) make install \ DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \ %{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 daemons/pacemakerd/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon %if %{with upstart_job} mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf install -m 644 pacemakerd/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf %endif %if %{defined _unitdir} mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif # Don't package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # For now, don't package the servicelog-related binaries built only for # ppc64le when certain dependencies are installed. If they get more exercise by # advanced users, we can reconsider. rm -f %{buildroot}/%{_sbindir}/notifyServicelogEvent rm -f %{buildroot}/%{_sbindir}/ipmiservicelogd # Don't ship init scripts for systemd based platforms %if %{defined _unitdir} rm -f %{buildroot}/%{_initrddir}/pacemaker rm -f %{buildroot}/%{_initrddir}/pacemaker_remote %endif # Byte-compile Python sources where suitable and the distro procedures known %if %{defined py_byte_compile} %{py_byte_compile %{python_path} %{buildroot}%{_datadir}/pacemaker/tests} %if !%{defined _python_bytecompile_extra} %{py_byte_compile %{python_path} %{buildroot}%{python_site}/cts} %endif %endif %if %{with coverage} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %post %if %{defined _unitdir} %systemd_post pacemaker.service %else /sbin/chkconfig --add pacemaker || : %endif %preun %if %{defined _unitdir} %systemd_preun pacemaker.service %else /sbin/service pacemaker stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker || : fi %endif %postun %if %{defined _unitdir} %systemd_postun_with_restart pacemaker.service %endif %pre remote %if %{defined _unitdir} # Stop the service before anything is touched, and remember to restart # it as one of the last actions (compared to using systemd_postun_with_restart, # this avoids suicide when sbd is in use) systemctl --quiet is-active pacemaker_remote if [ $? -eq 0 ] ; then mkdir -p %{_localstatedir}/lib/rpm-state/%{name} touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote systemctl stop pacemaker_remote >/dev/null 2>&1 else rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post remote %if %{defined _unitdir} %systemd_post pacemaker_remote.service %else /sbin/chkconfig --add pacemaker_remote || : %endif %preun remote %if %{defined _unitdir} %systemd_preun pacemaker_remote.service %else /sbin/service pacemaker_remote stop >/dev/null 2>&1 || : if [ "$1" -eq 0 ]; then # Package removal, not upgrade /sbin/chkconfig --del pacemaker_remote || : fi %endif %postun remote %if %{defined _unitdir} # This next line is a no-op, because we stopped the service earlier, but # we leave it here because it allows us to revert to the standard behavior # in the future if desired %systemd_postun_with_restart pacemaker_remote.service # Explicitly take care of removing the flag-file(s) upon final removal if [ "$1" -eq 0 ] ; then rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %posttrans remote %if %{defined _unitdir} if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then systemctl start pacemaker_remote >/dev/null 2>&1 rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote fi %endif %post cli %if %{defined _unitdir} %systemd_post crm_mon.service %endif if [ "$1" -eq 2 ]; then # Package upgrade, not initial install: # Move any pre-2.0 logs to new location to ensure they get rotated { mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \ || mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker } >/dev/null 2>/dev/null || : fi %preun cli %if %{defined _unitdir} %systemd_preun crm_mon.service %endif %postun cli %if %{defined _unitdir} %systemd_postun_with_restart crm_mon.service %endif %pre -n %{pkgname_pcmk_libs} getent group %{gname} >/dev/null || groupadd -r %{gname} -g %{hacluster_id} getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} -s /sbin/nologin -c "cluster user" %{uname} exit 0 %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets libs %ldconfig_scriptlets cluster-libs %else %post -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %postun -n %{pkgname_pcmk_libs} -p /sbin/ldconfig %post cluster-libs -p /sbin/ldconfig %postun cluster-libs -p /sbin/ldconfig %endif %files ########################################################### %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %if %{defined _unitdir} %{_unitdir}/pacemaker.service %else %{_initrddir}/pacemaker %endif %exclude %{_libexecdir}/pacemaker/cts-log-watcher %exclude %{_libexecdir}/pacemaker/cts-support %exclude %{_sbindir}/pacemaker-remoted %if %{with legacy_links} %exclude %{_sbindir}/pacemaker_remoted %endif %{_libexecdir}/pacemaker/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/fence_legacy %doc %{_mandir}/man7/pacemaker-controld.* %doc %{_mandir}/man7/pacemaker-schedulerd.* %doc %{_mandir}/man7/pacemaker-fenced.* %doc %{_mandir}/man7/ocf_pacemaker_controld.* %doc %{_mandir}/man7/ocf_pacemaker_o2cb.* %doc %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/fence_legacy.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_datadir}/pacemaker/alerts %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine /usr/lib/ocf/resource.d/pacemaker/controld /usr/lib/ocf/resource.d/pacemaker/o2cb /usr/lib/ocf/resource.d/pacemaker/remote %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/pacemaker.conf %config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf %endif %files cli %dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon %if %{defined _unitdir} %{_unitdir}/crm_mon.service %endif %if %{with upstart_job} %config(noreplace) %{_sysconfdir}/init/crm_mon.conf %endif %{_sbindir}/attrd_updater %{_sbindir}/cibadmin %{_sbindir}/crm_diff %{_sbindir}/crm_error %{_sbindir}/crm_failcount %{_sbindir}/crm_mon %{_sbindir}/crm_node %{_sbindir}/crm_resource %{_sbindir}/crm_rule %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/crm_shadow %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket %{_sbindir}/stonith_admin # "dirname" is owned by -schemas, which is a prerequisite %{_datadir}/pacemaker/report.collector %{_datadir}/pacemaker/report.common # XXX "dirname" is not owned by any prerequisite %{_datadir}/snmp/mibs/PCMK-MIB.txt %exclude /usr/lib/ocf/resource.d/pacemaker/controld %exclude /usr/lib/ocf/resource.d/pacemaker/o2cb %exclude /usr/lib/ocf/resource.d/pacemaker/remote %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %doc %{_mandir}/man7/* %exclude %{_mandir}/man7/pacemaker-controld.* %exclude %{_mandir}/man7/pacemaker-schedulerd.* %exclude %{_mandir}/man7/pacemaker-fenced.* %exclude %{_mandir}/man7/ocf_pacemaker_controld.* %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.* %exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_master.* %exclude %{_mandir}/man8/fence_legacy.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles %files -n %{pkgname_pcmk_libs} %{_libdir}/libcib.so.* %{_libdir}/liblrmd.so.* %{_libdir}/libcrmservice.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpacemaker.so.* %{_libdir}/libstonithd.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files cluster-libs %{_libdir}/libcrmcluster.so.* %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files remote %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} # state directory is shared between the subpackets # let rpm take care of removing it once it isn't # referenced anymore and empty %ghost %dir %{_localstatedir}/lib/rpm-state/%{name} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote %endif %{_sbindir}/pacemaker-remoted %if %{with legacy_links} %{_sbindir}/pacemaker_remoted %endif %{_mandir}/man8/pacemaker-remoted.* %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files doc %doc %{pcmk_docdir} %license licenses/CC-BY-SA-4.0 %files cts %{python_site}/cts %{_datadir}/pacemaker/tests %{_libexecdir}/pacemaker/cts-log-watcher %{_libexecdir}/pacemaker/cts-support %license licenses/GPLv2 %doc COPYING %doc ChangeLog %files -n %{pkgname_pcmk_libs}-devel %{_includedir}/pacemaker %{_libdir}/*.so %if %{with coverage} %{_var}/lib/pacemaker/gcov %endif %{_libdir}/pkgconfig/*.pc %license licenses/LGPLv2.1 %doc COPYING %doc ChangeLog %files schemas %license licenses/GPLv2 %dir %{_datadir}/pacemaker %{_datadir}/pacemaker/*.rng %{_datadir}/pacemaker/*.xsl %{_datadir}/pacemaker/api %{_datadir}/pkgconfig/pacemaker-schemas.pc %changelog * PACKAGE_DATE ClusterLabs PACKAGE_VERSION-1 - See included ChangeLog file for details diff --git a/tools/Makefile.am b/tools/Makefile.am index 477adeb77b..8b83f26f1c 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -1,156 +1,156 @@ # # Copyright 2004-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/Makefile.common if BUILD_SYSTEMD -systemdunit_DATA = crm_mon.service +systemdsystemunit_DATA = crm_mon.service endif noinst_HEADERS = crm_mon.h crm_resource.h pcmkdir = $(datadir)/$(PACKAGE) pcmk_DATA = report.common report.collector sbin_SCRIPTS = crm_report crm_standby crm_master crm_failcount if BUILD_CIBSECRETS sbin_SCRIPTS += cibsecret endif EXTRA_DIST = crm_mon.sysconfig \ crm_mon.8.inc \ stonith_admin.8.inc sbin_PROGRAMS = attrd_updater \ cibadmin \ crmadmin \ crm_simulate \ crm_attribute \ crm_diff \ crm_error \ crm_mon \ crm_node \ crm_resource \ crm_rule \ crm_shadow \ crm_verify \ crm_ticket \ iso8601 \ stonith_admin if BUILD_SERVICELOG sbin_PROGRAMS += notifyServicelogEvent endif if BUILD_OPENIPMI_SERVICELOG sbin_PROGRAMS += ipmiservicelogd endif ## SOURCES # A few tools are just thin wrappers around crm_attribute. # This makes their help get updated when crm_attribute changes # (see Makefile.common). MAN8DEPS = crm_attribute crmadmin_SOURCES = crmadmin.c crmadmin_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la crm_error_SOURCES = crm_error.c crm_error_LDADD = $(top_builddir)/lib/common/libcrmcommon.la cibadmin_SOURCES = cibadmin.c cibadmin_LDADD = $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la crm_shadow_SOURCES = crm_shadow.c crm_shadow_LDADD = $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la crm_node_SOURCES = crm_node.c crm_node_LDADD = $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la crm_simulate_SOURCES = crm_simulate.c crm_simulate_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/pacemaker/libpacemaker.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la crm_diff_SOURCES = crm_diff.c crm_diff_LDADD = $(top_builddir)/lib/common/libcrmcommon.la crm_mon_SOURCES = crm_mon.c crm_mon_curses.c crm_mon_print.c crm_mon_runtime.c crm_mon_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(top_builddir)/lib/pacemaker/libpacemaker.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la \ $(CURSESLIBS) crm_verify_SOURCES = crm_verify.c crm_verify_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/pacemaker/libpacemaker.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la crm_attribute_SOURCES = crm_attribute.c crm_attribute_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la crm_resource_SOURCES = crm_resource.c crm_resource_ban.c crm_resource_runtime.c crm_resource_print.c crm_resource_LDADD = $(top_builddir)/lib/pengine/libpe_rules.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(top_builddir)/lib/lrmd/liblrmd.la \ $(top_builddir)/lib/services/libcrmservice.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/pacemaker/libpacemaker.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la crm_rule_SOURCES = crm_rule.c crm_rule_LDADD = $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/pengine/libpe_rules.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/common/libcrmcommon.la iso8601_SOURCES = iso8601.c iso8601_LDADD = $(top_builddir)/lib/common/libcrmcommon.la attrd_updater_SOURCES = attrd_updater.c attrd_updater_LDADD = $(top_builddir)/lib/common/libcrmcommon.la crm_ticket_SOURCES = crm_ticket.c crm_ticket_LDADD = $(top_builddir)/lib/pengine/libpe_rules.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/pacemaker/libpacemaker.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/common/libcrmcommon.la stonith_admin_SOURCES = stonith_admin.c stonith_admin_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/fencing/libstonithd.la if BUILD_SERVICELOG notifyServicelogEvent_SOURCES = notifyServicelogEvent.c notifyServicelogEvent_CFLAGS = $(SERVICELOG_CFLAGS) notifyServicelogEvent_LDADD = $(top_builddir)/lib/common/libcrmcommon.la $(SERVICELOG_LIBS) endif if BUILD_OPENIPMI_SERVICELOG ipmiservicelogd_SOURCES = ipmiservicelogd.c ipmiservicelogd_CFLAGS = $(OPENIPMI_SERVICELOG_CFLAGS) $(SERVICELOG_CFLAGS) ipmiservicelogd_LDFLAGS = $(top_builddir)/lib/common/libcrmcommon.la $(OPENIPMI_SERVICELOG_LIBS) $(SERVICELOG_LIBS) endif CLEANFILES = $(man8_MANS) diff --git a/tools/ipmiservicelogd.c b/tools/ipmiservicelogd.c index 626cb80a0a..d43032dcd0 100644 --- a/tools/ipmiservicelogd.c +++ b/tools/ipmiservicelogd.c @@ -1,609 +1,609 @@ /* * ipmiservicelogd.c * * A program that listens to IPMI events and writes them * out to servicelog. * * Author: International Business Machines, IBM * Mark Hamzy * Author: Intel Corporation * Jeff Zheng * * Original copyright 2009 International Business Machines, IBM - * Later changes copyright 2009-2018 the Pacemaker project contributors + * Later changes copyright 2009-2019 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* gcc -o ipmiservicelogd -g `pkg-config --cflags --libs OpenIPMI OpenIPMIposix servicelog-1` ipmiservicelogd.c */ /* ./ipmiservicelogd smi 0 */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define COMPLEX 1 static os_handler_t *os_hnd; char *getStringExecOutput(const char *const args[]); char *getSerialNumber(void); char *getProductName(void); static void con_usage(const char *name, const char *help, void *cb_data); static void usage(const char *progname); void ipmi2servicelog(struct sl_data_bmc *bmc_data); static int sensor_threshold_event_handler(ipmi_sensor_t * sensor, enum ipmi_event_dir_e dir, enum ipmi_thresh_e threshold, enum ipmi_event_value_dir_e high_low, enum ipmi_value_present_e value_present, unsigned int raw_value, double value, void *cb_data, ipmi_event_t * event); static int sensor_discrete_event_handler(ipmi_sensor_t * sensor, enum ipmi_event_dir_e dir, int offset, int severity, int prev_severity, void *cb_data, ipmi_event_t * event); static void sensor_change(enum ipmi_update_e op, ipmi_entity_t * ent, ipmi_sensor_t * sensor, void *cb_data); static void entity_change(enum ipmi_update_e op, ipmi_domain_t * domain, ipmi_entity_t * entity, void *cb_data); void setup_done(ipmi_domain_t * domain, int err, unsigned int conn_num, unsigned int port_num, int still_connected, void *user_data); char * getStringExecOutput(const char *const args[]) { int rc; pid_t pid; int pipefd[2]; rc = pipe2(pipefd, 0); if (rc == -1) { crm_err("Error: pipe errno = %d", errno); return NULL; } pid = fork(); if (0 < pid) { /* Parent */ int childExitStatus; char serialNumber[256]; ssize_t sizeRead; /* close write end of pipe */ rc = close(pipefd[1]); if (rc == -1) { crm_err("Error: parent close (pipefd[1]) = %d", errno); } /* make 0 same as read-from end of pipe */ rc = dup2(pipefd[0], 0); if (rc == -1) { crm_err("Error: parent dup2 (pipefd[0]) = %d", errno); } /* close excess fildes */ rc = close(pipefd[0]); if (rc == -1) { crm_err("Error: parent close (pipefd[0]) = %d", errno); } waitpid(pid, &childExitStatus, 0); if (!WIFEXITED(childExitStatus)) { crm_err("waitpid() exited with an error: status = %d", WEXITSTATUS(childExitStatus)); return NULL; } else if (WIFSIGNALED(childExitStatus)) { crm_err("waitpid() exited due to a signal = %d", WTERMSIG(childExitStatus)); return NULL; } memset(serialNumber, 0, sizeof(serialNumber)); sizeRead = read(0, serialNumber, sizeof(serialNumber) - 1); if (sizeRead > 0) { char *end = serialNumber + strlen(serialNumber) - 1; while (end > serialNumber && (*end == '\n' || *end == '\r' || *end == '\t' || *end == ' ') ) { *end = '\0'; end--; } return strdup(serialNumber); } return NULL; } else if (pid == 0) { /* Child */ /* close read end of pipe */ rc = close(pipefd[0]); if (rc == -1) { crm_err("Error: child close (pipefd[0]) = %d", errno); } /* make 1 same as write-to end of pipe */ rc = dup2(pipefd[1], 1); if (rc == -1) { crm_err("Error: child dup2 (pipefd[1]) = %d", errno); } /* close excess fildes */ rc = close(pipefd[1]); if (rc == -1) { crm_err("Error: child close (pipefd[1]) = %d", errno); } /* execvp() takes (char *const *) for backward compatibility, * but POSIX guarantees that it will not modify the strings, * so the cast is safe */ rc = execvp(args[0], (char *const *) args); if (rc == -1) { crm_err("Error: child execvp = %d", errno); } /* In case of error */ return NULL; } else { /* Error */ crm_err("fork errno = %d", errno); return NULL; } return NULL; } char * getSerialNumber(void) { const char *const dmiArgs[] = { "dmidecode", "--string", "system-serial-number", NULL }; return getStringExecOutput(dmiArgs); } char * getProductName(void) { const char *dmiArgs[] = { "dmidecode", "--string", "system-product-name", NULL }; return getStringExecOutput(dmiArgs); } static void con_usage(const char *name, const char *help, void *cb_data) { printf("%s\n", help); } static void usage(const char *progname) { printf("Usage:\n"); printf(" %s \n", progname); printf(" Where is one of:\n"); ipmi_parse_args_iter_help(con_usage, NULL); } void ipmi2servicelog(struct sl_data_bmc *bmc_data) { servicelog *slog = NULL; struct sl_event sl_event; uint64_t new_id = 0; struct utsname name; char *serial_number = NULL; char *product_name = NULL; int rc; if (uname(&name) == -1) { crm_err("Error: uname failed"); return; } rc = servicelog_open(&slog, 0); /* flags is one of SL_FLAG_xxx */ if (!slog) { crm_err("Error: servicelog_open failed, rc = %d", rc); return; } serial_number = getSerialNumber(); if (serial_number) { if (strlen(serial_number) > 20) { serial_number[20] = '\0'; } } product_name = getProductName(); if (product_name) { if (strlen(product_name) > 20) { product_name[20] = '\0'; } } memset(&sl_event, 0, sizeof(sl_event)); /* *INDENT-OFF* */ sl_event.next = NULL; /* only used if in a linked list */ sl_event.id = 0; /* unique identifier - filled in by API call */ sl_event.time_logged = time (NULL); sl_event.time_event = time (NULL); sl_event.time_last_update = time (NULL); sl_event.type = SL_TYPE_BMC; /* one of SL_TYPE_* */ sl_event.severity = SL_SEV_WARNING; /* one of SL_SEV_* */ sl_event.platform = name.machine; /* ppc64, etc */ sl_event.machine_serial = serial_number; sl_event.machine_model = product_name; /* it may not have the serial # within the first 20 chars */ sl_event.nodename = name.nodename; sl_event.refcode = strdup("ipmi"); sl_event.description = strdup("ipmi event"); sl_event.serviceable = 1; /* 1 or 0 */ sl_event.predictive = 0; /* 1 or 0 */ sl_event.disposition = SL_DISP_RECOVERABLE; /* one of SL_DISP_* */ sl_event.call_home_status = SL_CALLHOME_NONE; /* one of SL_CALLHOME_*, only valid if serviceable */ sl_event.closed = 1; /* 1 or 0, only valid if serviceable */ sl_event.repair = 0; /* id of repairing repair_action */ sl_event.callouts = NULL; sl_event.raw_data_len = 0; sl_event.raw_data = NULL; sl_event.addl_data = &bmc_data; /* pointer to an sl_data_* struct */ /* *INDENT-ON* */ rc = servicelog_event_log(slog, &sl_event, &new_id); if (rc != 0) { crm_err("Error: servicelog_event_log, rc = %d (\"%s\")", rc, servicelog_error(slog)); } else { crm_debug("Sending to servicelog database"); } free(sl_event.refcode); free(sl_event.description); free(serial_number); free(product_name); servicelog_close(slog); } static int sensor_threshold_event_handler(ipmi_sensor_t * sensor, enum ipmi_event_dir_e dir, enum ipmi_thresh_e threshold, enum ipmi_event_value_dir_e high_low, enum ipmi_value_present_e value_present, unsigned int raw_value, double value, void *cb_data, ipmi_event_t * event) { ipmi_entity_t *ent = ipmi_sensor_get_entity(sensor); char name[IPMI_ENTITY_NAME_LEN]; struct sl_data_bmc bmc_data; uint32_t sel_id; uint32_t sel_type; uint16_t generator; uint8_t version; uint8_t sensor_type; int sensor_lun; int sensor_number; uint8_t event_class; uint8_t event_type; int direction; ipmi_sensor_get_id(sensor, name, sizeof(name)); ipmi_sensor_get_num(sensor, &sensor_lun, &sensor_number); sel_id = ipmi_entity_get_entity_id(ent); sel_type = ipmi_entity_get_type(ent); generator = ipmi_entity_get_slave_address(ent) | (sensor_lun << 5); /* LUN (2 bits) | SLAVE ADDRESS (5 bits) */ version = 0x04; sensor_type = ipmi_sensor_get_sensor_type(sensor); event_class = 0; /* @TBD - where does this come from? */ event_type = ipmi_event_get_type(event); direction = dir; memset(&bmc_data, 0, sizeof(bmc_data)); bmc_data.sel_id = sel_id; bmc_data.sel_type = sel_type; bmc_data.generator = generator; bmc_data.version = version; bmc_data.sensor_type = sensor_type; bmc_data.sensor_number = sensor_number; bmc_data.event_class = event_class; bmc_data.event_type = event_type; bmc_data.direction = direction; crm_debug("Writing bmc_data (%08x, %08x, %04x, %02x, %02x, %02x, %02x, %02x, %d)", bmc_data.sel_id, bmc_data.sel_type, bmc_data.generator, bmc_data.version, bmc_data.sensor_type, bmc_data.sensor_number, bmc_data.event_class, bmc_data.event_type, bmc_data.direction); ipmi2servicelog(&bmc_data); /* This passes the event on to the main event handler, which does not exist in this program. */ return IPMI_EVENT_NOT_HANDLED; } static int sensor_discrete_event_handler(ipmi_sensor_t * sensor, enum ipmi_event_dir_e dir, int offset, int severity, int prev_severity, void *cb_data, ipmi_event_t * event) { ipmi_entity_t *ent = ipmi_sensor_get_entity(sensor); char name[IPMI_ENTITY_NAME_LEN]; struct sl_data_bmc bmc_data; uint32_t sel_id; uint32_t sel_type; uint16_t generator; uint8_t version; uint8_t sensor_type; int sensor_lun; int sensor_number; uint8_t event_class; uint8_t event_type; int direction; ipmi_sensor_get_id(sensor, name, sizeof(name)); ipmi_sensor_get_num(sensor, &sensor_lun, &sensor_number); sel_id = ipmi_entity_get_entity_id(ent); sel_type = ipmi_entity_get_type(ent); generator = ipmi_entity_get_slave_address(ent) | (sensor_lun << 5); /* LUN (2 bits) | SLAVE ADDRESS (5 bits) */ version = 0x04; sensor_type = ipmi_sensor_get_sensor_type(sensor); event_class = 0; /* @TBD - where does this come from? */ event_type = ipmi_event_get_type(event); direction = dir; memset(&bmc_data, 0, sizeof(bmc_data)); bmc_data.sel_id = sel_id; bmc_data.sel_type = sel_type; bmc_data.generator = generator; bmc_data.version = version; bmc_data.sensor_type = sensor_type; bmc_data.sensor_number = sensor_number; bmc_data.event_class = event_class; bmc_data.event_type = event_type; bmc_data.direction = direction; crm_debug("Writing bmc_data (%08x, %08x, %04x, %02x, %02x, %02x, %02x, %02x, %d)", bmc_data.sel_id, bmc_data.sel_type, bmc_data.generator, bmc_data.version, bmc_data.sensor_type, bmc_data.sensor_number, bmc_data.event_class, bmc_data.event_type, bmc_data.direction); ipmi2servicelog(&bmc_data); /* This passes the event on to the main event handler, which does not exist in this program. */ return IPMI_EVENT_NOT_HANDLED; } /* Whenever the status of a sensor changes, the function is called We display the information of the sensor if we find a new sensor */ static void sensor_change(enum ipmi_update_e op, ipmi_entity_t * ent, ipmi_sensor_t * sensor, void *cb_data) { int rv; if (op == IPMI_ADDED) { if (ipmi_sensor_get_event_reading_type(sensor) == IPMI_EVENT_READING_TYPE_THRESHOLD) rv = ipmi_sensor_add_threshold_event_handler(sensor, sensor_threshold_event_handler, NULL); else rv = ipmi_sensor_add_discrete_event_handler(sensor, sensor_discrete_event_handler, NULL); if (rv) crm_err("Unable to add the sensor event handler: %x", rv); } } /* Whenever the status of an entity changes, the function is called When a new entity is created, we search all sensors that belong to the entity */ static void entity_change(enum ipmi_update_e op, ipmi_domain_t * domain, ipmi_entity_t * entity, void *cb_data) { int rv; if (op == IPMI_ADDED) { /* Register callback so that when the status of a sensor changes, sensor_change is called */ rv = ipmi_entity_add_sensor_update_handler(entity, sensor_change, entity); if (rv) { crm_err("ipmi_entity_set_sensor_update_handler: 0x%x", rv); crm_exit(CRM_EX_ERROR); } } } /* After we have established connection to domain, this function get called At this time, we can do whatever things we want to do. Herr we want to search all entities in the system */ void setup_done(ipmi_domain_t * domain, int err, unsigned int conn_num, unsigned int port_num, int still_connected, void *user_data) { int rv; /* Register a callback functin entity_change. When a new entities is created, entity_change is called */ rv = ipmi_domain_add_entity_update_handler(domain, entity_change, domain); if (rv) { crm_err("ipmi_domain_add_entity_update_handler return error: %d", rv); return; } } int main(int argc, char *argv[]) { int rv; int curr_arg = 1; ipmi_args_t *args; ipmi_con_t *con; /* OS handler allocated first. */ os_hnd = ipmi_posix_setup_os_handler(); if (!os_hnd) { crm_err("ipmi_smi_setup_con: Unable to allocate os handler"); crm_exit(CRM_EX_ERROR); } /* Initialize the OpenIPMI library. */ ipmi_init(os_hnd); // Check for pacemaker-standard help and version options if (argc > 1) { for (char **arg = &argv[1]; *arg != NULL; ++arg) { if (!strcmp(*arg, "--help") || !strcmp(*arg, "-?")) { usage(argv[0]); return CRM_EX_OK; } else if (!strcmp(*arg, "--version") || !strcmp(*arg, "-$")) { crm_help('$', CRM_EX_OK); } } } #ifdef COMPLEX rv = ipmi_parse_args2(&curr_arg, argc, argv, &args); if (rv) { crm_err("Error parsing command arguments, argument %d: %s", curr_arg, strerror(rv)); usage(argv[0]); crm_exit(CRM_EX_USAGE); } #endif - crm_make_daemon("ipmiservicelogd", TRUE, "/var/run/ipmiservicelogd.pid0"); + crm_make_daemon("ipmiservicelogd", TRUE, PCMK_RUN_DIR "/ipmiservicelogd.pid0"); crm_log_cli_init("ipmiservicelogd"); // Maybe this should log like a daemon instead? // crm_log_init("ipmiservicelogd", LOG_INFO, TRUE, FALSE, argc, argv, FALSE); #ifdef COMPLEX rv = ipmi_args_setup_con(args, os_hnd, NULL, &con); if (rv) { crm_err("ipmi_ip_setup_con: %s", strerror(rv)); crm_err("Error: Is IPMI configured correctly?"); crm_exit(CRM_EX_ERROR); } #else /* If all you need is an SMI connection, this is all the code you need. */ /* Establish connections to domain through system interface. This function connect domain, selector and OS handler together. When there is response message from domain, the status of file descriptor in selector is changed and predefined callback is called. After the connection is established, setup_done will be called. */ rv = ipmi_smi_setup_con(0, os_hnd, NULL, &con); if (rv) { crm_err("ipmi_smi_setup_con: %s", strerror(rv)); crm_err("Error: Is IPMI configured correctly?"); crm_exit(CRM_EX_ERROR); } #endif rv = ipmi_open_domain("", &con, 1, setup_done, NULL, NULL, NULL, NULL, 0, NULL); if (rv) { crm_err("ipmi_init_domain: %s", strerror(rv)); crm_exit(CRM_EX_ERROR); } /* This is the main loop of the event-driven program. Try to exit the program */ /* Let the selector code run the select loop. */ os_hnd->operation_loop(os_hnd); /* Technically, we can't get here, but this is an example. */ os_hnd->free_os_handler(os_hnd); return CRM_EX_OK; } diff --git a/xml/Makefile.am b/xml/Makefile.am index 5504d3a489..43ad0a5f0f 100644 --- a/xml/Makefile.am +++ b/xml/Makefile.am @@ -1,258 +1,263 @@ # # Copyright 2004-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/Makefile.common noarch_pkgconfig_DATA = $(builddir)/pacemaker-schemas.pc # Pacemaker has 3 schemas: the CIB schema, the API schema (for command-line # tool XML output), and a legacy schema for crm_mon --as-xml. # # See Readme.md for details on updating CIB schema files (API is similar) # The CIB and crm_mon schemas are installed directly in CRM_SCHEMA_DIRECTORY # for historical reasons, while the API schema is installed in a subdirectory. APIdir = $(CRM_SCHEMA_DIRECTORY)/api CIBdir = $(CRM_SCHEMA_DIRECTORY) MONdir = $(CRM_SCHEMA_DIRECTORY) # Extract a sorted list of available numeric schema versions # from filenames like NAME-MAJOR[.MINOR][.MINOR-MINOR].rng numeric_versions = $(shell ls -1 $(1) \ | sed -n -e 's/^.*-\([0-9][0-9.]*\).rng$$/\1/p' \ | sort -u -t. -k 1,1n -k 2,2n -k 3,3n) version_pairs = $(join \ $(1),$(addprefix \ -,$(wordlist \ 2,$(words $(1)),$(1) \ ) next \ ) \ ) version_pairs_last = $(wordlist \ $(words \ $(wordlist \ 2,$(1),$(2) \ ) \ ),$(1),$(2) \ ) # Names of API schemas that form the choices for pacemaker-result content API_request_base = command-output stonith_admin version # Names of CIB schemas that form the choices for cib/configuration content CIB_cfg_base = options nodes resources constraints fencing acls tags alerts # Names of all schemas (including top level and those included by others) API_base = $(API_request_base) item status CIB_base = cib $(CIB_cfg_base) status score rule nvset # Static schema files and transforms (only CIB has transforms) # # This is more complicated than it should be due to the need to support # VPATH builds and "make distcheck". We need the absolute paths for reliable # substitution back and forth, and relative paths for distributed files. API_abs_files = $(foreach base,$(API_base),$(wildcard $(abs_srcdir)/api/$(base)*.rng)) CIB_abs_files = $(foreach base,$(CIB_base),$(wildcard $(abs_srcdir)/$(base).rng $(abs_srcdir)/$(base)-*.rng)) CIB_abs_xsl = $(abs_srcdir)/upgrade-1.3.xsl \ $(abs_srcdir)/upgrade-2.10.xsl \ $(wildcard $(abs_srcdir)/upgrade-*enter.xsl) \ $(wildcard $(abs_srcdir)/upgrade-*leave.xsl) MON_abs_files = $(abs_srcdir)/crm_mon.rng API_files = $(foreach base,$(API_base),$(wildcard $(srcdir)/api/$(base)*.rng)) CIB_files = $(foreach base,$(CIB_base),$(wildcard $(srcdir)/$(base).rng $(srcdir)/$(base)-*.rng)) CIB_xsl = $(srcdir)/upgrade-1.3.xsl \ $(srcdir)/upgrade-2.10.xsl \ $(wildcard $(srcdir)/upgrade-*enter.xsl) \ $(wildcard $(srcdir)/upgrade-*leave.xsl) MON_files = $(srcdir)/crm_mon.rng # Sorted lists of all numeric schema versions API_numeric_versions = $(call numeric_versions,${API_files}) CIB_numeric_versions = $(call numeric_versions,${CIB_files}) # The highest numeric schema version API_max ?= $(lastword $(API_numeric_versions)) CIB_max ?= $(lastword $(CIB_numeric_versions)) # Sorted lists of all schema versions (including "next") API_versions = next $(API_numeric_versions) CIB_versions = next $(CIB_numeric_versions) # Build tree locations of static schema files and transforms (for VPATH builds) API_build_copies = $(foreach f,$(API_abs_files),$(subst $(abs_srcdir),$(abs_builddir),$(f))) CIB_build_copies = $(foreach f,$(CIB_abs_files) $(CIB_abs_xsl),$(subst $(abs_srcdir),$(abs_builddir),$(f))) MON_build_copies = $(foreach f,$(MON_abs_files),$(subst $(abs_srcdir),$(abs_builddir),$(f))) # Dynamically generated schema files API_generated = api/api-result.rng $(foreach base,$(API_versions),api/api-result-$(base).rng) CIB_generated = pacemaker.rng $(foreach base,$(CIB_versions),pacemaker-$(base).rng) versions.rng CIB_version_pairs = $(call version_pairs,${CIB_numeric_versions}) CIB_version_pairs_cnt = $(words ${CIB_version_pairs}) CIB_version_pairs_last = $(call version_pairs_last,${CIB_version_pairs_cnt},${CIB_version_pairs}) dist_API_DATA = $(API_files) dist_CIB_DATA = $(CIB_files) $(CIB_xsl) dist_MON_DATA = $(MON_files) nodist_API_DATA = $(API_generated) nodist_CIB_DATA = $(CIB_generated) EXTRA_DIST = Readme.md \ best-match.sh \ cibtr-2.rng \ context-of.xsl \ ocf-meta2man.xsl \ regression.sh \ upgrade-2.10-roundtrip.xsl \ upgrade-detail.xsl \ xslt_cibtr-2.rng \ assets \ test-2 \ test-2-enter \ test-2-leave \ test-2-roundtrip cib-versions: @echo "Max: $(CIB_max)" @echo "Available: $(CIB_versions)" api-versions: @echo "Max: $(API_max)" @echo "Available: $(API_versions)" # Dynamically generated top-level API schema api/api-result.rng: api/api-result-$(API_max).rng $(AM_V_at)$(MKDIR_P) api # might not exist in VPATH build $(AM_V_SCHEMA)cp $(top_builddir)/xml/$< $@ api/api-result-%.rng: $(API_build_copies) best-match.sh Makefile.am $(AM_V_at)echo '' > $@ $(AM_V_at)echo '' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)for rng in $(API_request_base); do $(srcdir)/best-match.sh api/$$rng $(*) $(@) " " || :; done $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)$(srcdir)/best-match.sh api/status $(*) $(@) " " || : $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_SCHEMA)echo '' >> $@ # Dynamically generated top-level CIB schema pacemaker.rng: pacemaker-$(CIB_max).rng $(AM_V_SCHEMA)cp $(top_builddir)/xml/$< $@ pacemaker-%.rng: $(CIB_build_copies) best-match.sh Makefile.am $(AM_V_at)echo '' > $@ $(AM_V_at)echo '' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)$(srcdir)/best-match.sh cib $(*) $(@) " " $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)for rng in $(CIB_cfg_base); do $(srcdir)/best-match.sh $$rng $(*) $(@) " " || :; done $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)$(srcdir)/best-match.sh status $(*) $(@) " " $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_SCHEMA)echo '' >> $@ # Dynamically generated CIB schema listing all pacemaker versions versions.rng: Makefile.am $(AM_V_at)echo '' > $@ $(AM_V_at)echo '' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' none' >> $@ $(AM_V_at)echo ' pacemaker-0.6' >> $@ $(AM_V_at)echo ' transitional-0.6' >> $@ $(AM_V_at)echo ' pacemaker-0.7' >> $@ $(AM_V_at)echo ' pacemaker-1.1' >> $@ $(AM_V_at)for rng in $(CIB_versions); do echo " pacemaker-$$rng" >> $@; done $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_at)echo ' ' >> $@ $(AM_V_SCHEMA)echo '' >> $@ # diff fails with ec=2 if no predecessor is found; # this uses '=' GNU extension to sed, if that's not available, # one can use: hline=`echo "$${p}" | grep -Fn "$${hunk}" | cut -d: -f1`; # XXX: use line information from hunk to avoid "not detected" for ambiguity version_diff = \ @for p in $(1); do \ set `echo "$${p}" | tr '-' ' '`; \ echo "\#\#\# *-$$2.rng vs. predecessor"; \ for v in *-$$2.rng; do \ echo "\#\#\#\# $${v} vs. predecessor"; b=`echo "$${v}" | cut -d- -f1`; \ old=`./best-match.sh $${b} $$1`; \ p=`diff -u "$${old}" "$${v}" 2>/dev/null`; \ case $$? in \ 1) echo "$${p}" | sed -n -e '/^@@ /!d;=;p' \ -e ':l;n;/^\([- ]\|+.*<[^ />]\+\([^/>]\+="ID\|>$$\)\)/bl;s/^[+ ]\(.*\)/\1/p' \ | while read hline; do \ read h && read i || break; \ iline=`grep -Fn "$${i}" "$${v}" | cut -d: -f1`; \ ctxt="(not detected)"; \ if test `echo "$${iline}" | wc -l` -eq 1; then \ ctxt=`{ sed -n -e "1,$$(($${iline}-1))p" "$${v}"; \ echo "$${i}"; \ sed -n -e "$$(($${iline}+1)),$$ p" "$${v}"; \ } | $(XSLTPROC) --param skip 1 context-of.xsl -`; \ fi; \ echo "$${p}" | sed -n -e "$$(($${hline}-2)),$${hline}!d" \ -e '/^\(+++\|---\)/p'; \ echo "$${h} context: $${ctxt}"; \ echo "$${p}" | sed -n -e "1,$${hline}d" \ -e '/^\(---\|@@ \)/be;p;d;:e;n;be'; \ done; \ ;; \ 2) echo "\#\#\#\#\# $${v} has no predecessor";; \ esac; \ done; \ done diff: best-match.sh @echo "# Comparing changes in + since $(CIB_max)" $(call version_diff,${CIB_version_pairs_last}) fulldiff: best-match.sh @echo "# Comparing all changes across all the subsequent increments" $(call version_diff,${CIB_version_pairs}) CLEANFILES = $(API_generated) $(CIB_generated) +clean-local: + if [ "x$(srcdir)" != "x$(builddir)" ]; then \ + rm -f $(API_build_copies) $(CIB_build_copies) $(MON_build_copies); \ + fi + # Enable ability to use $@ in prerequisite .SECONDEXPANSION: # For VPATH builds, copy the static schema files into the build tree $(API_build_copies) $(CIB_build_copies) $(MON_build_copies): $$(subst $(abs_builddir),$(srcdir),$$(@)) $(AM_V_GEN)if [ "x$(srcdir)" != "x$(builddir)" ]; then \ $(MKDIR_P) "$(dir $(@))"; \ cp "$(<)" "$(@)"; \ fi